repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
nuclear-wizard/moose | test/tests/time_integrators/scalar/run.py | 12 | 4487 | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import subprocess
import sys
import csv
import matplotlib.pyplot as plt
import numpy as np
# Use fonts that match LaTeX
from matplotlib import rcParams
rcParams['font.family'] = 'serif'
rcParams['font.size'] = 17
rcParams['font.serif'] = ['Computer Modern Roman']
rcParams['text.usetex'] = True
# Small font size for the legend
from matplotlib.font_manager import FontProperties
fontP = FontProperties()
fontP.set_size('x-small')
def get_last_row(csv_filename):
'''
Function which returns just the last row of a CSV file. We have to
read every line of the file, there was no stackoverflow example of
reading just the last line.
http://stackoverflow.com/questions/20296955/reading-last-row-from-csv-file-python-error
'''
with open(csv_filename, 'r') as f:
lastrow = None
for row in csv.reader(f):
if (row != []): # skip blank lines at end of file.
lastrow = row
return lastrow
def run_moose(dt, time_integrator):
'''
Function which actually runs MOOSE.
'''
implicit_flag = 'true'
explicit_methods = ['ExplicitEuler', 'ExplicitMidpoint', 'Heun', 'Ralston']
# Set implicit_flag based on TimeIntegrator name
if (time_integrator in explicit_methods):
implicit_flag = 'false'
command_line_args = ['../../../moose_test-opt', '-i', 'scalar.i',
'Executioner/dt={}'.format(dt),
'Executioner/TimeIntegrator/type={}'.format(time_integrator),
'GlobalParams/implicit={}'.format(implicit_flag)]
try:
child = subprocess.Popen(command_line_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# communicate() waits for the process to terminate, so there's no
# need to wait() for it. It also sets the returncode attribute on
# child.
(stdoutdata, stderrdata) = child.communicate()
if (child.returncode != 0):
print('Running MOOSE failed: program output is below:')
print(stdoutdata)
raise
except:
print('Error executing moose_test')
sys.exit(1)
# Parse the last line of the output file to get the error at the final time.
last_row = get_last_row('scalar_out.csv')
return float(last_row[1])
#
# Main program
#
fig = plt.figure()
ax1 = fig.add_subplot(111)
# Lists of timesteps and TimeIntegrators to plot.
time_integrators = ['ImplicitEuler', 'ImplicitMidpoint', 'LStableDirk2', 'BDF2', 'CrankNicolson',
'LStableDirk3', 'LStableDirk4', 'AStableDirk4',
'ExplicitEuler', 'ExplicitMidpoint', 'Heun', 'Ralston']
dts = [.125, .0625, .03125, .015625]
# Plot colors
colors = ['maroon', 'blue', 'green', 'black', 'burlywood', 'olivedrab', 'midnightblue',
'tomato', 'darkmagenta', 'chocolate', 'lightslategray', 'skyblue']
# Plot line markers
markers = ['v', 'o', 'x', '^', 'H', 'h', '+', 'D', '*', '4', 'd', '8']
# Plot line styles
linestyles = [':', '-', '-.', '--', ':', '-.', '--', ':', '--', '-', '-.', '-']
for i in xrange(len(time_integrators)):
time_integrator = time_integrators[i]
# Place to store the results for this TimeIntegrator
results = []
# Call MOOSE to compute the results
for dt in dts:
results.append(run_moose(dt, time_integrator))
# Make plot
xdata = np.log10(np.reciprocal(dts))
ydata = np.log10(results)
# Compute linear fit of last three points.
start_fit = len(xdata) - 3
end_fit = len(xdata)
fit = np.polyfit(xdata[start_fit:end_fit], ydata[start_fit:end_fit], 1)
# Make the plot -- unpack the user's additional plotting arguments
# from kwargs by prepending with **.
ax1.plot(xdata, ydata, label=time_integrator + ", $" + "{:.2f}".format(fit[0]) + "$",
color=colors[i], marker=markers[i], linestyle=linestyles[i])
# Set up the axis labels.
ax1.set_xlabel('$\log (\Delta t^{-1})$')
ax1.set_ylabel('$\log \|e(T)\|_{L^2}$')
# Add a legend
plt.legend(loc='lower left', prop=fontP)
# Save a PDF
plt.savefig('plot.pdf', format='pdf')
# Local Variables:
# python-indent: 2
# End:
| lgpl-2.1 |
OSU-CS-325/Project_Two_Coin_Change | run-files/analysisQ7.py | 1 | 2957 | import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import random
import datetime
# Import the three change making algorithms
sys.path.insert(0, "../divide-conquer/")
sys.path.insert(0, "../dynamic-programming")
sys.path.insert(0, "../greedy")
from changeslow import changeslow
from changegreedy import changegreedy
from changedp import changedp
### QUESTION 7 ###
def Q7(slow, minChange, maxChange):
lenV = []
runtimeGreedy = []
runtimeDP = []
runtimeSlow = []
numExp = 10
maxRange = 1000
if (slow):
maxRange = 10 # some much smaller number
for i in range(1, maxRange): # V can be of length 1 to (maxRange - 1)
print "\n------ running V length=" + str(i) + "------"
lenV.append(i)
#print "lenV:", lenV
runtimeGreedy.append(0)
runtimeDP.append(0)
runtimeSlow.append(0)
for j in range(numExp): # run numExp experiments for this length of V
print "\n ---- running experiment=" + str(j + 1) + " ----"
coinArray = []
for k in range(i): # generate V of size i [1, rand, ..., rand, max=1 + 5*(maxRange-2)]
if (k == 0):
coinArray.append(1)
else:
randFrom = coinArray[len(coinArray) - 1] + 1
randTo = coinArray[len(coinArray) - 1] + 5
coinArray.append(random.randint(randFrom, randTo))
change = random.randint(minChange, maxChange)
#print " coinArray:", coinArray
#print " change:", change
print " running greedy..."
start = datetime.datetime.now()
_, _ = changegreedy(coinArray, change)
end = datetime.datetime.now()
delta = end - start
delta = int(delta.total_seconds() * 1000000)
print " " + str(delta)
runtimeGreedy[i - 1] += delta
print " running DP..."
start = datetime.datetime.now()
_, _ = changedp(coinArray, change)
end = datetime.datetime.now()
delta = end - start
delta = int(delta.total_seconds() * 1000000)
print " " + str(delta)
runtimeDP[i - 1] += delta
if (slow):
print " running slow..."
start = datetime.datetime.now()
_, _ = changeslow(coinArray, change)
end = datetime.datetime.now()
delta = end - start
delta = int(delta.total_seconds() * 1000000)
print " " + str(delta)
runtimeSlow[i - 1] += delta
runtimeGreedy[i - 1] /= numExp
runtimeDP[i - 1] /= numExp
if (slow):
runtimeSlow[i - 1] /= numExp
plt.figure(21)
plt.plot(lenV, runtimeGreedy, 'b-', linewidth=2.0, label='Greedy')
plt.plot(lenV, runtimeDP, 'r--', linewidth=2.0, label='DP')
if (slow):
plt.plot(lenV, runtimeSlow, 'g-.', linewidth=2.0, label='Slow')
plt.legend(loc='upper left')
plt.title('Runtime vs len(V[]) for randomized V[] and A')
plt.ylabel('Avg. Runtime (10^-6 sec)')
plt.xlabel('len(V[])')
plt.grid(True)
if (slow):
plt.savefig('img/Q7slow_runtime.png', bbox_inches='tight')
else:
plt.savefig('img/Q7_runtime.png', bbox_inches='tight')
def main():
Q7(False, 100, 100)
#Q7(True)
if __name__ == "__main__":
main()
| mit |
stormvirux/vturra-cli | vturra/asys.py | 1 | 1936 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# from scipy import stats
# import statsmodels.api as sm
# from numpy.random import randn
import matplotlib as mpl
# import seaborn as sns
# sns.set_color_palette("deep", desat=.6)
mpl.rc("figure", figsize=(8, 4))
def Compavg():
data=Total()
markMax=[]
markAvg=[]
N = 5
ind = np.arange(N)
width = 0.35
fig = plt.figure()
ax = fig.add_subplot(111)
markMax.extend((data["Total"].max(),data["Total.1"].max(),data["Total.2"].max(),data["Total.3"].max(),data["Total.4"].max()))
markAvg.extend((data["Total"].mean(),data["Total.1"].mean(),data["Total.2"].mean(),data["Total.3"].mean(),data["Total.4"].mean()))
rects1 = ax.bar(ind, markMax, width, color='black')
rects2 = ax.bar(ind+width, markAvg, width, color='green')
ax.set_xlim(-width,len(ind)+width)
ax.set_ylim(0,120)
ax.set_ylabel('Marks')
ax.set_title('Max, Mean and Your Marks')
xTickMarks = ['Subject'+str(i) for i in range(1,6)]
ax.set_xticks(ind+width)
xtickNames = ax.set_xticklabels(xTickMarks)
plt.setp(xtickNames, rotation=10, fontsize=10)
ax.legend( (rects1[0], rects2[0]), ('Max', 'Mean') )
plt.show()
def compSub():
# max_data = np.r_[data["Total"]].max()
# bins = np.linspace(0, max_data, max_data + 1)
data=Total()
plt.hist(data['Total'],linewidth=0, alpha=.7)
plt.hist(data['Total.1'],linewidth=0,alpha=.7)
plt.hist(data['Total.2'],linewidth=0,alpha=.7)
plt.hist(data['Total.3'],linewidth=0,alpha=.7)
plt.hist(data['Total.4'],linewidth=0,alpha=.7)
plt.title("Total marks Histogram")
plt.xlabel("Value")
plt.ylabel("Frequency")
plt.show()
def Total():
data=pd.read_csv("output10cs.csv")
df3=data[['Total','Total.1','Total.2','Total.3','Total.4','Total.5','Total.6','Total.7']]
data["Main Total"]=df3.sum(axis=1)
data = data.dropna()
data.reset_index(drop=True)
return data
#compSub()
# Compavg()
| mit |
abhishekkrthakur/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
sssundar/Drone | rotation/viz.py | 1 | 5332 | # Python script to visualize rotation about a non-body axis.
# Let the lab frame be the inertial frame S.
# Let the origin of the rigid body be O, in the inertial frame S'.
# Let r_ss' be the vector from S to S'.
# Let the body frame relative to O be S''.
# Consider a fixed point on the body, r_s' in S', and r_s'' in S''.
# Assume the body is subject to zero external torques.
# It must be rotating about a fixed axis, n, by Euler's rotation theorem.
# It must have a constant angular velocity about that axis by d/dt L = sum(T_external) = 0 and L = Jw about the rotation axis.
# Let R be the rotation matrix mapping a vector in S'' to S', with inverse R^T
# We know r_s' = R r_s''
# We know d/dt r_s' = (dR/dt R^T) * (R r_s'') = (dR/dt R^T) r_s'
# Therefore we expect (dR/dt R^T) to be the operator (w x) in the S' frame.
# The goal of this script is to visualize this.
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import sys
import numpy as np
from numpy import pi as pi
from numpy import cos as c
from numpy import sin as s
from numpy import dot as dot
from numpy import transpose as transpose
# The axis phi is a rotation about the z axis in the body frame (yaw)
# The axis theta is a rotation about the y axis in the phi-rotated body frame (pitch)
# The axis psi is a rotation about the x axis in the phi, theta-rotated body frame (roll)
def R(phi, theta, psi):
R = np.zeros((3,3))
R[0,0] = c(phi)*c(theta)
R[1,0] = s(phi)*c(theta)
R[2,0] = -s(theta)
R[0,1] = -s(phi)*c(psi) + c(phi)*s(theta)*s(psi)
R[1,1] = c(phi)*c(psi) + s(phi)*s(theta)*s(psi)
R[2,1] = c(theta)*s(psi)
R[0,2] = s(phi)*s(psi) + c(phi)*s(theta)*c(psi)
R[1,2] = -c(phi)*s(psi) + s(phi)*s(theta)*c(psi)
R[2,2] = c(theta)*c(psi)
return R
# Rotate z-axis (0,0,1) by pi radians about x-axis. Should end up at (0,0,-1) cutting across y.
# Rotate (0,0,-1) by pi radians about y-axis. Should end up at (0,0,1) again, cutting across x.
# Try both at the same time. Should still end up at (0,0,1).
def test_R():
e3_spp = np.array((0,0,1))
vectors = []
for k in np.linspace(0,pi,100):
vectors.append(dot(R(0,0,k), e3_spp))
e3_spp = vectors[-1]
for k in np.linspace(0,pi,100):
vectors.append(dot(R(0,k,0), e3_spp))
e3_spp = vectors[-1]
for k in np.linspace(0,pi,100):
vectors.append(dot(R(0,k,k), e3_spp))
xs = [k[0] for k in vectors]
ys = [k[1] for k in vectors]
zs = [k[2] for k in vectors]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(xs=xs,ys=ys,zs=zs)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
plt.show()
# Sets values lower than epsilon to zero.
# Prints the result with precision 0.3f.
def sanitize_matrix(A):
print ""
epsilon = 0.001
for r in xrange(3):
text = ""
for c in xrange(3):
if abs(A[r, c]) < epsilon:
A[r,c] = 0
text += "%6.2f,\t" % A[r,c]
print text[:-2]
print ""
def sanitize_vector(a):
print ""
epsilon = 0.001
text = ""
for r in xrange(3):
if abs(a[r]) < epsilon:
a[r] = 0
text += "%6.2f,\t" % a[r]
print text[:-2]
print ""
def vectorize(W):
v = np.zeros(3)
v[0] = W[1,0]
v[1] = W[0,2]
v[2] = W[2,1]
return v
# This is the (w x) operator, W, with respect to changing body yaw, pitch, and roll.
# It is dR/dt R^T. The arguments are the current Euler angles and their time derivatives.
def W(phi, theta, psi, dphi, dtheta, dpsi):
Rp = np.zeros((3,3))
Rp[0,0] = (-s(phi)*dphi)*c(theta)
Rp[0,0] += c(phi)*(-s(theta)*dtheta)
Rp[1,0] = (c(phi)*dphi)*c(theta)
Rp[1,0] += s(phi)*(-s(theta)*dtheta)
Rp[2,0] = -c(theta)*dtheta
Rp[0,1] = (-c(phi)*dphi)*c(psi)
Rp[0,1] += -s(phi)*(-s(psi)*dpsi)
Rp[0,1] += (-s(phi)*dphi)*s(theta)*s(psi)
Rp[0,1] += c(phi)*(c(theta)*dtheta)*s(psi)
Rp[0,1] += c(phi)*s(theta)*(c(psi)*dpsi)
Rp[1,1] = (-s(phi)*dphi)*c(psi)
Rp[1,1] += c(phi)*(-s(psi)*dpsi)
Rp[1,1] += (c(phi)*dphi)*s(theta)*s(psi)
Rp[1,1] += s(phi)*(c(theta)*dtheta)*s(psi)
Rp[1,1] += s(phi)*s(theta)*(c(psi)*dpsi)
Rp[2,1] = (-s(theta)*dtheta)*s(psi)
Rp[2,1] += c(theta)*(c(psi)*dpsi)
Rp[0,2] = (c(phi)*dphi)*s(psi)
Rp[0,2] += s(phi)*(c(psi)*dpsi)
Rp[0,2] += (-s(phi)*dphi)*s(theta)*c(psi)
Rp[0,2] += c(phi)*(c(theta)*dtheta)*c(psi)
Rp[0,2] += c(phi)*s(theta)*(-s(psi)*dpsi)
Rp[1,2] = (s(phi)*dphi)*s(psi)
Rp[1,2] += -c(phi)*(c(psi)*dpsi)
Rp[1,2] += (c(phi)*dphi)*s(theta)*c(psi)
Rp[1,2] += s(phi)*(c(theta)*dtheta)*c(psi)
Rp[1,2] += s(phi)*s(theta)*(-s(psi)*dpsi)
Rp[2,2] = (-s(theta)*dtheta)*c(psi)
Rp[2,2] += c(theta)*(-s(psi)*dpsi)
w_i = vectorize(dot(Rp, transpose(R(phi,theta,psi))))
w_b = dot(transpose(R(phi,theta,psi)), w_i)
return (w_i, w_b)
def test_W():
# Is the effective w for a rotation of x rad/s about ek just.. ek*x,
# regardless of the angle about axis ek? We expect W = -W^T as well.
# sanitize_matrix(W(3*pi/12,0,0,2*pi,0,0)[0])
# sanitize_matrix(W(0,3*pi/12,0,0,2*pi,0)[0])
# sanitize_matrix(W(0,0,3*pi/12,0,0,2*pi)[0])
# Let's see what it looks like once we've rotated a bit.
# It's still skew antisymmetric with zero trace! This looks like the operation (w x)!!!!
phi, theta, psi = (pi/4, 3*pi/12, -pi)
w_i, w_b = W(phi, theta, psi, pi, 2*pi, 3*pi)
def Main():
test_W()
if __name__ == "__main__":
Main()
| gpl-3.0 |
Dwii/Master-Thesis | implementation/Palabos/cavity_benchmark/plot_benchmark.py | 1 | 1854 | # Display a list of *.dat files in a bar chart.
# Based on an example from https://chrisalbon.com/python/matplotlib_grouped_bar_plot.html
import sys
import os
import matplotlib.pyplot as plt
import numpy as np
if len(sys.argv) > 3 and (len(sys.argv)-3) % 2 :
print("usage: python3 {0} <benchmark> <image path> (<dat1> <legend1> [<dat2> <legend2>] .. [<datN> <legendN>] ) ".format(os.path.basename(sys.argv[0])))
exit(1)
benchmark = sys.argv[1]
image_path = sys.argv[2]
groups = (len(sys.argv)-3)/2
# Load benchark
domains = ()
nb_setups = 0
for line in open(benchmark,'r'):
n, snx, sny, snz = line.split()
domains += ( r"{0}$^3$".format(snx), ) #+= ( "{0}x{1}x{2}".format(snx, sny, snz), )
nb_setups += 1
# Setting the positions and width for the bars
pos = list(range(nb_setups))
width = 1 / (groups+2)
# Plotting the bars
fig, ax = plt.subplots(figsize=(10,5))
prop_iter = iter(plt.rcParams['axes.prop_cycle'])
legends = ()
maxLups = 0
for i, argi in enumerate(range(3, len(sys.argv), 2)):
mlups = np.array(list(map(float, open(sys.argv[argi])))) / 1E6
legends += ( sys.argv[argi+1], )
maxLups = max(maxLups, max(mlups))
plt.bar([p + width*i for p in pos],
mlups,
width,
alpha=0.5,
color=next(prop_iter)['color'])
# Set the y axis label
ax.set_ylabel('MLUPS')
ax.set_xlabel('Taille du sous-domaine')
# Set the chart's title
#ax.set_title(title)
# Set the position of the x ticks
ax.set_xticks([p + 1.5 * width for p in pos])
# Set the labels for the x ticks
ax.set_xticklabels(domains)
# Setting the x-axis and y-axis limits
plt.xlim(min(pos)-width, max(pos)+width*4)
#plt.ylim([0, maxLups] )
# Adding the legend and showing the plot
plt.legend(legends, loc='upper center')
ax.yaxis.grid()
plt.savefig(image_path)
plt.tight_layout()
plt.show() | mit |
wanggang3333/scikit-learn | sklearn/tests/test_kernel_approximation.py | 244 | 7588 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
AxelTLarsson/robot-localisation | robot_localisation/main.py | 1 | 6009 | """
This module contains the logic to run the simulation.
"""
import sys
import os
import argparse
import numpy as np
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from robot_localisation.grid import Grid, build_transition_matrix
from robot_localisation.robot import Robot, Sensor
from robot_localisation.hmm_filter import FilterState
def help_text():
"""
Return a helpful text explaining usage of the program.
"""
return """
------------------------------- HMM Filtering ---------------------------------
Type a command to get started. Type 'quit' or 'q' to quit.
Valid commands (all commands are case insensitive):
ENTER move the robot one step further in the simulation,
will also output current pose and estimated
position of the robot
help show this help text
show T show the transition matrix T
show f show the filter column vector
show O show the observation matrix
quit | q quit the program
-------------------------------------------------------------------------------
"""
def main():
parser = argparse.ArgumentParser(description='Robot localisation with HMM')
parser.add_argument(
'-r', '--rows',
type=int,
help='the number of rows on the grid, default is 4',
default=4)
parser.add_argument(
'-c', '--columns',
type=int,
help='the number of columns on the grid, default is 4',
default=4)
args = parser.parse_args()
# Initialise the program
size = (args.rows, args.columns)
the_T_matrix = build_transition_matrix(*size)
the_filter = FilterState(transition=the_T_matrix)
the_sensor = Sensor()
the_grid = Grid(*size)
the_robot = Robot(the_grid, the_T_matrix)
sensor_value = None
obs = None
print(help_text())
print("Grid size is {} x {}".format(size[0], size[1]))
print(the_robot)
print("The sensor says: {}".format(sensor_value))
filter_est = the_grid.index_to_pose(the_filter.belief_state)
pos_est = (filter_est[0], filter_est[1])
print("The HMM filter thinks the robot is at {}".format(filter_est))
print("The Manhattan distance is: {}".format(
manhattan(the_robot.get_position(), pos_est)))
np.set_printoptions(linewidth=1000)
# Main loop
while True:
user_command = str(input('> '))
if user_command.upper() == 'QUIT' or user_command.upper() == 'Q':
break
elif user_command.upper() == 'HELP':
print(help_text())
elif user_command.upper() == 'SHOW T':
print(the_T_matrix)
elif user_command.upper() == 'SHOW F':
print(the_filter.belief_matrix)
elif user_command.upper() == 'SHOW O':
print(obs)
elif not user_command:
# take a step then approximate etc.
the_robot.step()
sensor_value = the_sensor.get_position(the_robot)
obs = the_sensor.get_obs_matrix(sensor_value, size)
the_filter.forward(obs)
print(the_robot)
print("The sensor says: {}".format(sensor_value))
filter_est = the_grid.index_to_pose(the_filter.belief_state)
pos_est = (filter_est[0], filter_est[1])
print("The HMM filter thinks the robot is at {}".format(filter_est))
print("The Manhattan distance is: {}".format(
manhattan(the_robot.get_position(), pos_est)))
else:
print("Unknown command!")
def manhattan(pos1, pos2):
"""
Calculate the Manhattan distance between pos1 and pos2.
"""
x1, y1 = pos1
x2, y2 = pos2
return abs(x1-x2) + abs(y1-y2)
def automated_run():
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10, 7))
navg = 20
nsteps = 10
for size in (2, 2), (3, 3), (4, 4), (5, 5), (10, 10):
avg_distances = np.zeros(shape=(nsteps+1,))
for n in range(navg):
distances = list()
none_values = list()
the_T_matrix = build_transition_matrix(*size)
the_filter = FilterState(transition=the_T_matrix)
the_sensor = Sensor()
the_grid = Grid(*size)
the_robot = Robot(the_grid, the_T_matrix)
# get the manhattan distance at the start
filter_est = the_grid.index_to_pose(the_filter.belief_state)
pos_est = (filter_est[0], filter_est[1])
distances.append(manhattan(the_robot.get_position(), pos_est))
for i in range(nsteps):
# take a step then approximate etc.
the_robot.step()
sensor_value = the_sensor.get_position(the_robot)
if sensor_value is None:
none_values.append(i) # keep track of where None was returned
obs = the_sensor.get_obs_matrix(sensor_value, size)
the_filter.forward(obs)
filter_est = the_grid.index_to_pose(the_filter.belief_state)
pos_est = (filter_est[0], filter_est[1])
distances.append(manhattan(the_robot.get_position(), pos_est))
avg_distances += np.array(distances)
avg_distances /= navg
base_line, = plt.plot(avg_distances, label="Grid size {}".format(size))
# for point in none_values:
# plt.scatter(point, distances[point], marker='o',
# color=base_line.get_color(), s=40)
plt.legend()
plt.xlim(0, nsteps)
plt.ylim(0,)
plt.ylabel("Manhattan distance")
plt.xlabel("Steps")
plt.title("Manhattan distance from true position and inferred position \n"
"from the hidden Markov model (average over %s runs)" % navg)
fig.savefig("automated_run.png")
plt.show()
if __name__ == '__main__':
main()
# automated_run()
| mit |
mutirri/bokeh | bokeh/cli/core.py | 42 | 16025 | from __future__ import absolute_import, print_function
import sys, os
from six.moves.urllib import request as urllib2
from six.moves import cStringIO as StringIO
import pandas as pd
try:
import click
is_click = True
except ImportError:
is_click = False
from . import help_messages as hm
from .utils import (get_chart_params, get_charts_mapping,
get_data_series, keep_source_input_sync, get_data_from_url)
from .. import charts as bc
from ..charts import utils as bc_utils
from bokeh.models.widgets import Button
# Define a mapping to connect chart types supported arguments and chart classes
CHARTS_MAP = get_charts_mapping()
if is_click:
@click.command()
@click.option('--input', 'input_source', default=None,help=hm.HELP_INPUT)
@click.option('--output', default='file://cli_output.html', help=hm.HELP_OUTPUT)
@click.option('--title', default='Bokeh CLI')
@click.option('--chart_type', default='Line')
@click.option('--index', default='', help=hm.HELP_INDEX)
@click.option('--series', default='', help=hm.HELP_SERIES)
@click.option('--palette')
@click.option('--buffer', default='f', help=hm.HELP_BUFFER)
@click.option('--sync_with_source', default=False)
@click.option('--update_ranges', 'update_ranges', flag_value=True,
default=False)
@click.option('--legend', 'show_legend', flag_value=True,
default=False)
@click.option('--window_size', default='0', help=hm.HELP_WIN_SIZE)
@click.option('--map', 'map_', default=None)
@click.option('--map_zoom', 'map_zoom', default=12)
@click.option('--map_layer', 'map_layer', default="hybrid")
@click.option('--smart_filters', 'smart_filters', flag_value=True,
default=False)
def cli(input_source, output, title, chart_type, series, palette, index,
buffer, sync_with_source, update_ranges, show_legend, window_size,
map_, smart_filters, map_zoom, map_layer):
"""Bokeh Command Line Tool is a minimal client to access high level plotting
functionality provided by bokeh.charts API.
Examples:
>> python bokeh-cli.py --title "My Nice Plot" --series "High,Low,Close"
--chart_type "Line" --palette Reds --input sample_data/stocks_data.csv
>> cat sample_data/stocks_data.csv | python bokeh-cli.py --buffer t
>> python bokeh-cli.py --help
"""
cli = CLI(
input_source, output, title, chart_type, series, palette, index, buffer,
sync_with_source, update_ranges, show_legend, window_size, map_,
smart_filters, map_zoom, map_layer
)
cli.run()
else:
def cli():
print("The CLI tool requires click to be installed")
class CLI(object):
"""This is the Bokeh Command Line Interface class and it is in
charge of providing a very high level access to bokeh charts and
extends it with functionality.
"""
def __init__(self, input_source, output, title, chart_type, series, palette,
index, buffer, sync_with_source, update_ranges, show_legend,
window_size, map_, smart_filters, map_zoom, map_layer):
"""Args:
input_source (str): path to the series data file (i.e.:
/source/to/my/data.csv)
NOTE: this can be either a path to a local file or an url
output (str, optional): Selects the plotting output, which
could either be sent to an html file or a bokeh server
instance. Syntax convention for this option is as follows:
<output_type>://<type_arg>
where:
- output_type: 'file' or 'server'
- 'file' type options: path_to_output_file
- 'server' type options syntax: docname[@url][@name]
Defaults to: --output file://cli_output.html
Examples:
--output file://cli_output.html
--output file:///home/someuser/bokeh_rocks/cli_output.html
--output server://clidemo
Default: file://cli_output.html.
title (str, optional): the title of your chart.
Default: None.
chart_type (str, optional): charts classes to use to consume and
render the input data.
Default: Line.
series (str, optional): Name of the series from the input source
to include in the plot. If not specified all source series
will be included.
Defaults to None.
palette (str, optional): name of the colors palette to use.
Default: None.
index (str, optional): Name of the data series to be used as the
index when plotting. By default the first series found on the
input file is taken
Default: None
buffer (str, optional): if is `t` reads data source as string from
input buffer using StringIO(sys.stdin.read()) instead of
reading from a file or an url.
Default: "f"
sync_with_source (bool, optional): if True keep the charts source
created on bokeh-server sync'ed with the source acting like
`tail -f`.
Default: False
window_size (int, optional): show up to N values then start dropping
off older ones
Default: '0'
Attributes:
source (obj): datasource object for the created chart.
chart (obj): created chart object.
"""
self.input = input_source
self.series = series
self.index = index
self.last_byte = -1
self.sync_with_source = sync_with_source
self.update_ranges = update_ranges
self.show_legend = show_legend
self.window_size = int(window_size)
self.smart_filters = smart_filters
self.map_options = {}
self.current_selection = []
self.source = self.get_input(input_source, buffer)
# get the charts specified by the user
self.factories = create_chart_factories(chart_type)
if palette:
print ("Sorry, custom palettes not supported yet, coming soon!")
# define charts init parameters specified from cmd line and create chart
self.chart_args = get_chart_params(
title, output, show_legend=self.show_legend
)
if self.smart_filters:
self.chart_args['tools'] = "pan,wheel_zoom,box_zoom,reset,save," \
"box_select,lasso_select"
if map_:
self.map_options['lat'], self.map_options['lng'] = \
[float(x) for x in map_.strip().split(',')]
self.map_options['zoom'] = int(map_zoom)
# Yeah, unfortunate namings.. :-)
self.map_options['map_type'] = map_layer
def on_selection_changed(self, obj, attrname, old, new):
self.current_selection = new
def limit_source(self, source):
""" Limit source to cli.window_size, if set.
Args:
source (mapping): dict-like object
"""
if self.window_size:
for key in source.keys():
source[key] = source[key][-self.window_size:]
def run(self):
""" Start the CLI logic creating the input source, data conversions,
chart instances to show and all other niceties provided by CLI
"""
try:
self.limit_source(self.source)
children = []
if self.smart_filters:
copy_selection = Button(label="copy current selection")
copy_selection.on_click(self.on_copy)
children.append(copy_selection)
self.chart = create_chart(
self.series, self.source, self.index, self.factories,
self.map_options, children=children, **self.chart_args
)
self.chart.show()
self.has_ranged_x_axis = 'ranged_x_axis' in self.source.columns
self.columns = [c for c in self.source.columns if c != 'ranged_x_axis']
if self.smart_filters:
for chart in self.chart.charts:
chart.source.on_change('selected', self, 'on_selection_changed')
self.chart.session.poll_document(self.chart.doc)
except TypeError:
if not self.series:
series_list = ', '.join(self.chart.values.keys())
print(hm.ERR_MSG_TEMPL % series_list)
raise
if self.sync_with_source:
keep_source_input_sync(self.input, self.update_source, self.last_byte)
def on_copy(self, *args, **kws):
print("COPYING CONTENT!")
# TODO: EXPERIMENTAL!!! THIS EXPOSE MANY SECURITY ISSUES AND SHOULD
# BE REMOVED ASAP!
txt = ''
for rowind in self.current_selection:
row = self.source.iloc[rowind]
txt += u"%s\n" % (u",".join(str(row[c]) for c in self.columns))
os.system("echo '%s' | pbcopy" % txt)
def update_source(self, new_source):
""" Update self.chart source with the new data retrieved from
new_source. It is done by parsing the new source line,
trasforming it to data to be appended to self.chart source
updating it on chart.session and actually updating chart.session
objects.
Args:
new_source (str): string that contains the new source row to
read to the current chart source.
"""
ns = pd.read_csv(StringIO(new_source), names=self.columns)
len_source = len(self.source)
if self.has_ranged_x_axis:
ns['ranged_x_axis'] = [len_source]
self.index = 'ranged_x_axis'
ns.index = [len_source]
self.source = pd.concat([self.source, ns])
# TODO: This should be replaced with something that just computes
# the new data and source
fig = create_chart(self.series, ns, self.index, self.factories,
self.map_options, **self.chart_args)
for i, _c in enumerate(fig.charts):
if not isinstance(_c, bc.GMap):
# TODO: nested charts are getting ridiculous. Need a better
# better interface for charts :-)
scc = self.chart.charts[i]
for k, v in _c.source.data.items():
scc.source.data[k] = list(scc.source.data[k]) + list(v)
self.limit_source(scc.source.data)
chart = scc.chart
chart.session.store_objects(scc.source)
if self.update_ranges:
plot = chart.plot
plot.y_range.start = min(
plot.y_range.start, _c.chart.plot.y_range.start
)
plot.y_range.end = max(
plot.y_range.end, _c.chart.plot.y_range.end
)
plot.x_range.start = min(
plot.x_range.start, _c.chart.plot.x_range.start
)
plot.x_range.end = max(
plot.x_range.end, _c.chart.plot.x_range.end
)
chart.session.store_objects(plot)
def get_input(self, filepath, buffer):
"""Parse received input options. If buffer is not false (=='f') if
gets input data from input buffer othewise opens file specified in
sourcefilename,
Args:
filepath (str): path to the file to read from to retrieve data
buffer (str): if == 't' reads data from input buffer
Returns:
string read from filepath/buffer
"""
if buffer != 'f':
filepath = StringIO(sys.stdin.read())
elif filepath is None:
msg = "No Input! Please specify --source_filename or --buffer t"
raise IOError(msg)
else:
if filepath.lower().startswith('http'):
# Create a request for the given URL.
request = urllib2.Request(filepath)
data = get_data_from_url(request)
self.last_byte = len(data)
else:
filepath = open(filepath, 'r').read()
self.last_byte = len(filepath)
filepath = StringIO(filepath)
source = pd.read_csv(filepath)
return source
def create_chart(series, source, index, factories, map_options=None, children=None, **args):
"""Create charts instances from types specified in factories using
data series names, source, index and args
Args:
series (list(str)): list of strings specifying the names of the
series to keep from source
source (DataFrame): pandas DataFrame with the data series to be
plotted
index (str): name of the series of source to be used as index.
factories (list(ChartObject)): list of chart classes to be used
to create the charts to be plotted
**args: arguments to pass to the charts when creating them.
"""
if not index:
# if no index was specified as for x axis
# we take a default "range"
index = 'ranged_x_axis'
# add the new x range data to the source dataframe
source[index] = range(len(source[source.columns[0]]))
indexes = [x for x in index.split(',') if x]
data_series = get_data_series(series, source, indexes)
# parse queries to create the charts..
charts = []
for chart_type in factories:
if chart_type == bc.GMap:
if not map_options or \
not all([x in map_options for x in ['lat', 'lng']]):
raise ValueError("GMap Charts need lat and lon coordinates!")
all_args = dict(map_options)
all_args.update(args)
chart = chart_type(**all_args)
else:
if chart_type == bc.TimeSeries:
# in case the x axis type is datetime that column must be converted to
# datetime
data_series[index] = pd.to_datetime(source[index])
elif chart_type == bc.Scatter:
if len(indexes) == 1:
scatter_ind = [x for x in data_series.pop(indexes[0]).values]
scatter_ind = [scatter_ind] * len(data_series)
else:
scatter_ind = []
for key in indexes:
scatter_ind.append([x for x in data_series.pop(key).values])
if len(scatter_ind) != len(data_series):
err_msg = "Number of multiple indexes must be equals" \
" to the number of series"
raise ValueError(err_msg)
for ind, key in enumerate(data_series):
values = data_series[key].values
data_series[key] = zip(scatter_ind[ind], values)
chart = chart_type(data_series, **args)
if hasattr(chart, 'index'):
chart.index = index
charts.append(chart)
fig = bc_utils.Figure(*charts, children=children, **args)
return fig
def create_chart_factories(chart_types):
"""Receive the chart type(s) specified by the user and build a
list of the their related functions.
Args:
series (str): string that contains the name of the
chart classes to use when creating the chart, separated by `,`
example:
>> create_chart_factories('Line,step')
[Line, Step]
"""
return [get_chart(name) for name in chart_types.split(',') if name]
def get_chart(class_name):
"""Return the bokeh class specified in class_name.
Args:
class_name (str): name of the chart class to return (i.e.: Line|step)
"""
return CHARTS_MAP[class_name.strip().lower()]
if __name__ == '__main__':
cli()
| bsd-3-clause |
yukisakurai/hhana | mva/plotting/utils.py | 1 | 4190 | import ROOT
from itertools import izip
from matplotlib import cm
from rootpy.plotting.style.atlas.labels import ATLAS_label
from rootpy.memory.keepalive import keepalive
from .. import ATLAS_LABEL
def set_colors(hists, colors='jet'):
if isinstance(colors, basestring):
colors = cm.get_cmap(colors, len(hists))
if hasattr(colors, '__call__'):
for i, h in enumerate(hists):
color = colors((i + 1) / float(len(hists) + 1))
h.SetColor(color)
else:
for h, color in izip(hists, colors):
h.SetColor(color)
def category_lumi_atlas(pad, category_label=None,
data_info=None, atlas_label=None,
textsize=20):
left, right, bottom, top = pad.margin_pixels
height = float(pad.height_pixels)
# draw the category label
if category_label:
label = ROOT.TLatex(
1. - pad.GetRightMargin(),
1. - (textsize - 2) / height,
category_label)
label.SetNDC()
label.SetTextFont(43)
label.SetTextSize(textsize)
label.SetTextAlign(31)
with pad:
label.Draw()
keepalive(pad, label)
# draw the luminosity label
if data_info is not None:
plabel = ROOT.TLatex(
1. - pad.GetLeftMargin() - 0.25,
1. - (top + textsize + 60) / height,
str(data_info))
plabel.SetNDC()
plabel.SetTextFont(43)
plabel.SetTextSize(textsize)
plabel.SetTextAlign(31)
with pad:
plabel.Draw()
keepalive(pad, plabel)
# draw the ATLAS label
if atlas_label is not False:
label = atlas_label or ATLAS_LABEL
ATLAS_label(pad.GetLeftMargin() + 0.03,
1. - (top + textsize + 15) / height,
sep=0.132, pad=pad, sqrts=None,
text=label,
textsize=textsize)
pad.Update()
pad.Modified()
def label_plot(pad, template, xaxis, yaxis,
ylabel='Events', xlabel=None,
units=None, data_info=None,
category_label=None,
atlas_label=None,
extra_label=None,
extra_label_position='left',
textsize=22):
# set the axis labels
binw = list(template.xwidth())
binwidths = list(set(['%.2g' % w for w in binw]))
if units is not None:
if xlabel is not None:
xlabel = '%s [%s]' % (xlabel, units)
if ylabel and len(binwidths) == 1 and binwidths[0] != '1':
# constant width bins
ylabel = '%s / %s %s' % (ylabel, binwidths[0], units)
elif ylabel and len(binwidths) == 1 and binwidths[0] != '1':
ylabel = '%s / %s' % (ylabel, binwidths[0])
if ylabel:
yaxis.SetTitle(ylabel)
if xlabel:
xaxis.SetTitle(xlabel)
left, right, bottom, top = pad.margin_pixels
height = float(pad.height_pixels)
category_lumi_atlas(pad, category_label, data_info, atlas_label)
# draw the extra label
if extra_label is not None:
if extra_label_position == 'left':
label = ROOT.TLatex(pad.GetLeftMargin() + 0.03,
1. - (top + 2 * (textsize + 40)) / height,
extra_label)
else: # right
label = ROOT.TLatex(1. - pad.GetRightMargin() - 0.03,
1. - (top + 2 * (textsize + 40)) / height,
extra_label)
label.SetTextAlign(31)
label.SetNDC()
label.SetTextFont(43)
label.SetTextSize(textsize)
with pad:
label.Draw()
keepalive(pad, label)
pad.Update()
pad.Modified()
# class rootpy.plotting.Legend(
# entries, pad=None,
# leftmargin=0.5, topmargin=0.05, rightmargin=0.05,
# entryheight=0.06, entrysep=0.02, margin=0.3,
# textfont=None, textsize=None, header=None)
def legend_params(position, textsize):
return dict(
leftmargin=0.48, topmargin=0.03, rightmargin=0.05,
entryheight=0.05,
entrysep=0.01,
margin=0.25,
textsize=textsize)
| gpl-3.0 |
Achuth17/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 129 | 7848 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
taotaocoule/stock | spider/data/bond.py | 1 | 1159 | # 国债指数:id=0000121;http://pdfm2.eastmoney.com/EM_UBG_PDTI_Fast/api/js?id=0000121&TYPE=k&js=(x)&rtntype=5&isCR=false&fsData1518154947301=fsData1518154947301
# 沪市企业: id=0000131;http://pdfm2.eastmoney.com/EM_UBG_PDTI_Fast/api/js?id=0000131&TYPE=k&js=(x)&rtntype=5&isCR=false&fsData1518156740923=fsData1518156740923
# 深圳企业:id=3994812;http://pdfm2.eastmoney.com/EM_UBG_PDTI_Fast/api/js?id=3994812&TYPE=k&js=(x)&rtntype=5&isCR=false&fsData1518156947700=fsData1518156947700
import urllib.request
import pandas as pd
import json
class Bond(object):
"""docstring for Bond"""
def __init__(self):
self.index = {
'国债指数':'0000121',
'沪市企业债':'0000131',
'深圳企业债':'3994812'
}
def bond_index(self,id):
url = r'http://pdfm2.eastmoney.com/EM_UBG_PDTI_Fast/api/js?id={}&TYPE=k&js=(x)&rtntype=5&isCR=false&fsData1518154947301=fsData1518154947301'.format(id)
raw = json.loads(urllib.request.urlopen(url).read())
head = ['日期','开盘','收盘','最高','最低','成交量','成交金额','振幅']
return pd.DataFrame(list(map(lambda x:x.split(','),raw['data'])),columns=head) | mit |
shyamalschandra/scikit-learn | sklearn/neighbors/nearest_centroid.py | 38 | 7356 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
from ..utils.multiclass import check_classification_targets
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
check_classification_targets(y)
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
Loisel/tmr3 | tmr.py | 1 | 15096 | #!/usr/bin/python
"""
A module to calculate the current, the conductance and the TMR from
a set of rate arrays.
The rate arrays are supposed to be stored in a h5 file in the job directory.
The result is stored in a h5 file. The name of the dataset contains all
parameters. They are also stored as attributes in the dataset.
The conductance in the two lead configurations (parallel/anti-parallel)
are stored in arrays in the dataset.
Usage:
./tmr.py <jobname>
"""
import numpy as np
from numpy import linalg
import time
import sys
import getopt
import h5py
import os
# We are picky about possible floating point overflows
# to avoid calculating NaNs
np.seterr(divide="raise")
np.seterr(invalid="raise")
# A helper module to calculate the populations.
import pop
# The configuration module
import cfg
# path to the dat directory
datpath = "dat/"
# name of the temporary file where the rates are stored
ratefile = "running_calc.h5"
# name of the h5 file to store the conductance for the two configuration
# and the configuraion parameters.
hdffile = "simdata_new.h5"
def save_hdf5(fname,G_P,G_AP):
"""
Store the conductance and the configuration to the h5 file.
Args:
fname: filename of the h5 file
G_P: the conductance for leads with parallel magnetization
G_AP: the conductance for leads with anti-parallel magnetization
"""
print "Shape of GP {}".format(G_P.shape)
fileh = h5py.File(fname,"a")
# Note that the selection of parameters to construct the name of the
# dataset should be chosen such that this string is unique!
# That is, it should contain all running parameters.
dset_name = "G={}_kbT={}_Ec={}_E0={}_Pol={}_PolOrb={}_SO={}_tau={}_DS={}_B_P={}_B_AP={}_B_ORB_P={}_B_ORB_AP={}_W_e={}_W_0={}".format(cfg.conf['G_scale'],cfg.conf['kBT'],cfg.conf['E_C'],cfg.conf['E_0'],cfg.conf['Pol'],cfg.conf['OrbPol'],cfg.conf['SO'],cfg.conf['tau_r'],cfg.conf['D_S_factor'],cfg.conf['B_P'],cfg.conf['B_AP'],cfg.conf['B_ORB_P'],cfg.conf['B_ORB_AP'],cfg.conf['W_E'],cfg.conf['W_0'])
try:
# we create the dataset
dset = fileh.create_dataset(dset_name,data=np.vstack((G_P,G_AP)))
# and store the config attributes
dset.attrs['alpha'] = cfg.conf['ALPHA']
dset.attrs['temperature'] = cfg.conf['kBT']
dset.attrs['coupling'] = cfg.conf['G_scale']
dset.attrs['electron_number'] = cfg.conf['N_0']
dset.attrs['charging_energy'] = cfg.conf['E_C']
dset.attrs['level_spacing'] = cfg.conf['E_0']
dset.attrs['polarization_spin'] = cfg.conf['Pol']
dset.attrs['polarization_orbit'] = cfg.conf['OrbPol']
dset.attrs['spinorbit'] = cfg.conf['SO']
dset.attrs['stonershift'] = cfg.conf['D_S_factor']
dset.attrs['tau_r'] = cfg.conf['tau_r']
dset.attrs['vg_min'] = cfg.conf['V_g_min']
dset.attrs['vg_max'] = cfg.conf['V_g_max']
dset.attrs['b_p'] = cfg.conf['B_P']
dset.attrs['b_ap'] = cfg.conf['B_AP']
dset.attrs['b_orb_p'] = cfg.conf['B_ORB_P']
dset.attrs['b_orb_ap'] = cfg.conf['B_ORB_AP']
dset.attrs['w_0'] = cfg.conf['W_0']
dset.attrs['w_e'] = cfg.conf['W_E']
dset.attrs['timestamp'] = time.time()
except KeyError:
# If the choice was not unique we complain but continue.
print "Dataset exists."
fileh.close()
def eval_DENKER(GM,GP,configuration):
"""
Evaluate the density matrix kernel using the in- and out-tunneling rates.
Args:
GM,GP: numpy arrays containing in- and out-tunneling rates
in the order of cfg.TLIST.
configuration: integer determining parallel (0) or anti-parallel(1)
configuration
Returns:
the density matrix as a square 2-d numpy array that is NP**2 in size,
where NP is the number of states in the groundstatespace.
"""
# we get a view on the transition list and, for simplicity, its transpose
TLIST = cfg.TLIST[configuration]
TLIST_T = np.transpose(TLIST)
# from all transitions we extract all groundstates in the statespace
# this is probably a complicated way to do it
PLIST = list(set(TLIST_T[0]).union(TLIST_T[1]))
# ... and sort it by index
PLIST.sort()
# the number of groundstates
NP = len(PLIST)
# let's create an empty density matrix
ME = np.zeros((NP,NP))
# we create a version of the transition list that does not contain
# the indices in terms of the energy array (see cfg.py), but
# in terms of the number in the state list (plist)
# (the transition list can then be used to denote non-zero matrix elements)
TMP = np.copy(TLIST)
for idx,val in enumerate(PLIST):
TMP[TLIST == val] = idx
# We calculate diagonal elements of the density matrix:
# TLIST_T[1] == num selects the correct in-tunneling rates for the
# state with label num
# have a look at numpy.where to understand this line
for idx,num in enumerate(PLIST):
ME[idx,idx] = -np.sum(np.where(TLIST_T[1] == num,GP,0.)) - np.sum(np.where(TLIST_T[0] == num,GM,0.))
# for the off diagonal elements we can directly use the generated TMP
# transition list
for k,tup in enumerate(TMP):
ME[tup[0],tup[1]] = GP[k]
ME[tup[1],tup[0]] = GM[k]
# print "tup: {} and matrix element {}".format(tup,ME[tuple(tup)])
return ME
def eval_CURKER(GM,GP,configuration):
"""
Evaluate the current kernel using the in- and out-tunneling rates.
Args:
GM,GP: numpy arrays containing in- and out-tunneling rates
in the order of cfg.TLIST.
configuration: integer determining parallel (0) or anti-parallel(1)
configuration
Returns:
the current kernel as a 1-d numpy array.
"""
# We get a view on the transition list and its transpose
TLIST = cfg.TLIST[configuration]
TLIST_T = np.transpose(TLIST)
# ... and extract the list of groundstates (see also eval_DENKER)
PLIST = list(set(TLIST_T[0]).union(TLIST_T[1]))
PLIST.sort()
# this determines the size of the statespace
NP = len(PLIST)
CUR = np.zeros(NP)
# Note that the current kernel can be calculated by summing the diagonal elements
# of the density matrix with opposite sign
# compare eval_DENKER
for idx,num in enumerate(PLIST):
CUR[idx] = np.sum(np.where(TLIST_T[1] == num,GP,0.)) - np.sum(np.where(TLIST_T[0] == num,GM,0.))
return CUR
def current(GP,GM,POP,configuration):
"""
Calculate the current using the rates and populations.
Args:
GP, GM: np-arrays containing in- and out-tunneling rates.
POP: np-array for the populations
configuration: integer determining parallel (0) or anti-parallel(1)
configuration
Returns:
current as a float.
"""
# We calculate the current kernel
CURKER = eval_CURKER(GM,GP,configuration)
# and vector-multiply it with the population vector
I = -np.sum(cfg.conf["ELE"]*np.dot( CURKER, POP))
return I
def eval_tmr(fname,plotname,pop):
"""
Calculates the TMR by evaluating conductance through
parallel and anti-parallel polarized contacts.
Args:
fname: the h5 file to load the rates from.
plotname: A name for the pdf output to produce.
pop: If True, we plot the populations, too.
"""
# We prepare the current and conductance vectors for different
# values of gate and bias voltage
C_p = np.zeros((cfg.conf['NV'],cfg.conf['NVb']))
C_ap = np.zeros((cfg.conf['NV'],cfg.conf['NVb']))
G_p = np.zeros((cfg.conf['NV'],cfg.conf['NVb']-1))
G_ap = np.zeros((cfg.conf['NV'],cfg.conf['NVb']-1))
dVb = cfg.conf['Vb_range'][1]- cfg.conf['Vb_range'][0]
# the population vectors, for all values of gate and bias
POP_p = np.zeros((cfg.conf['NVb'],cfg.conf['NV'],cfg.N_GS[0]))
POP_ap = np.zeros((cfg.conf['NVb'],cfg.conf['NV'],cfg.N_GS[1]))
# We iterate over two bias values first
for nV,Vb in enumerate(cfg.conf["Vb_range"]):
# now the rates are loaded from the h5 file
# note that the label of the specific rate arrays are fixed
with h5py.File(fname) as file:
GP0_p = np.array(file['par_P0_V{}'.format(Vb)])
GP0_ap = np.array(file['apa_P0_V{}'.format(Vb)])
GP1_p = np.array(file['par_P1_V{}'.format(Vb)])
GP1_ap = np.array(file['apa_P1_V{}'.format(Vb)])
GM0_p = np.array(file['par_M0_V{}'.format(Vb)])
GM0_ap = np.array(file['apa_M0_V{}'.format(Vb)])
GM1_p = np.array(file['par_M1_V{}'.format(Vb)])
GM1_ap = np.array(file['apa_M1_V{}'.format(Vb)])
# for the density kernel, we sum all rates over both leads
DENKER_p = np.array([eval_DENKER(GM0_p[n]+GM1_p[n],GP0_p[n]+GP1_p[n],0)for n in range(cfg.conf["NV"])])
DENKER_ap = np.array([eval_DENKER(GM0_ap[n]+GM1_ap[n],GP0_ap[n]+GP1_ap[n],1)for n in range(cfg.conf["NV"])])
# the populations are calculated from the density kernel by an asymptotic
# approximation scheme
POP_ap[nV] = np.array([pop.asymptotic_ssp(DENKER_ap[n]) for n in range(cfg.conf["NV"])])
POP_p[nV] = np.array([pop.asymptotic_ssp(DENKER_p[n]) for n in range(cfg.conf["NV"])])
# note that the current is calculated from the rates in one of the leads only
C_p[:,nV] = np.array([ current(GP0_p[n],GM0_p[n],POP_p[nV,n],0) for n in np.arange(cfg.conf["NV"]) ])
C_ap[:,nV] = np.array([ current(GP0_ap[n],GM0_ap[n],POP_ap[nV,n],1) for n in np.arange(cfg.conf["NV"]) ])
# the numerical derivative gives the conductance
G_p = np.diff(C_p).flatten()/dVb
G_ap = np.diff(C_ap).flatten()/dVb
# we save the conductance traces to a h5 file specified as a global variable
# hdffile in the path datpath
# It is possible that the dataset already exists. In this case, we issue a warning.
try:
save_hdf5("{}{}".format(datpath,hdffile),G_p,G_ap)
except RuntimeError:
print "Unable to save to {}, maybe there is already a dataset with similar parameters...".format(hdffile)
# the tmr and conductance graphs are plotted to a pdf file for review.
plot_tmr_pdf(G_p,G_ap,plotname)
# if the pop flag is set, we also plot the population for one bias value
if pop:
plot_population([POP_p[0],POP_ap[0]],os.path.splitext(plotname)[0]+"_POP.pdf")
def plot_tmr_pdf(C_p,C_ap,fname):
"""
A helper routine to plot the conductance and TMR to a pdf file in the datpath.
Args:
C_p, C_ap: the parallel and anti-parallel conductance.
fname: the filename to plot to
"""
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
# we plot the conductance graph on top, p and ap with different colors
Axes1 = plt.subplot(2,1,1)
Axes1.set_xticklabels([])
plt.ylabel("Conductance (e^2/h)")
plt.title("Conductance at zero bias")
# parallel is plotted in red, and anti-parallel as blue dashed line
plt.plot( cfg.conf["V_g"],C_p,'r',cfg.conf["V_g"], C_ap, 'b--')
# on the second panel, the TMR is plotted
Axes2 = plt.subplot(2,1,2)
plt.xlabel("gate voltage (V)")
plt.ylabel("TMR")
plt.title("TMR")
plt.ylim((-0.3,1.5))
TMR = np.zeros(cfg.conf["NV"])
for i in range(cfg.conf["NV"]):
try:
TMR[i] = C_p[i]/C_ap[i]-1.
except ZeroDivisionError:
print "Zero Division, returning null."
TMR[i] = 0.
plt.plot( cfg.conf["V_g"], TMR)
plt.savefig(fname, bbox_inches='tight')
def plot_population(POP, fname):
"""
Calculates and plots selected populations of the quantum dot
with gate voltage. The edge states N=-1 and 5 are neglected.
Args:
POP: a list with the two population vectors
for parallel and anti-parallel configurations
fname: the filename to plot to
"""
import matplotlib.pyplot as plt
NV = cfg.conf["NV"]
print "Calculating populations..."
# We plot the populations for both configurations
# the parallel populations on top
# the anti-parallel on bottom
Ax = [plt.subplot(2,1,1),plt.subplot(2,1,2)]
cm = plt.get_cmap('gist_rainbow')
PopPlots = [1,4,8,12,17,18]
NP = len(PopPlots)
for gamidx in range(2):
TLIST = cfg.TLIST[gamidx]
TLIST_T = np.transpose(TLIST)
PLIST = list(set(TLIST_T[0]).union(TLIST_T[1]))
PLIST.sort()
# we cycle through the linecolors to distinguish the different
# groundstates
Ax[gamidx].set_color_cycle([cm(1.*k/NP) for k in range(NP)])
for i in PopPlots:
color = cm(1.*i/NP)
LABEL = "P_{}".format(cfg.int_to_state(PLIST[i]))
Ax[gamidx].plot( cfg.conf["V_g"], POP[gamidx][:,i],label=LABEL)
lines =Ax[gamidx].get_lines()
labels = [l.get_label() for l in lines]
leg = plt.figlegend(lines,labels,loc='upper right')
plt.savefig(fname)
plt.show()
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def main(argv=None):
"""
Interface routine to call the tmr module.
Example:
./tmr.py <jobname>
In principle, there were routines to plot rates, populations,
conductances etc. but apart from the population plotting,
none of the use cases was needed anymore.
"""
POP = False
# The default config file is called cnt.conf
cfile = "cnt.conf"
rlist = [0.,]
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:], "hc:P", ["help","config=","pop"])
except getopt.error, msg:
raise Usage(msg)
for o,a in opts:
if o in ('-h','--help'):
usage()
exit()
elif o in ('-c','--config'):
cfile = a
elif o in ('-P','--pop'):
POP = True
else:
raise Usage('Invalid argument.')
# we parse the config and initialize it
cfg.parse_conf("dat/{0}/{1}".format(args[0],cfile))
cfg.init()
h5file = "{}{}/{}".format(datpath,args[0],ratefile)
pdffile = "{}{}.pdf".format(datpath,args[0])
print "Try to open {}".format(h5file)
eval_tmr(h5file,pdffile,POP)
except Usage, err:
print >>sys.stderr, err.msg
print >>sys.stderr, "for help use --help"
return 2
def usage():
print "This is a tool to process rate files.\n\
\n\
usage: tmr.py [-hP] [--pop] jobname\n\
\n\
--pop or -P: Plot the populations.\n\
\n\
jobname: The name of the directory for the rate files.\n\
\n\
The script searches for files dat/jobname/running_calc.h5\n\
and dat/jobname/cnt.conf"
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
dariox2/CADL | session-5/libs/stylenet.py | 4 | 11350 | """Style Net w/ tests for Video Style Net.
Video Style Net requires OpenCV 3.0.0+ w/ Contrib for Python to be installed.
Creative Applications of Deep Learning w/ Tensorflow.
Kadenze, Inc.
Copyright Parag K. Mital, June 2016.
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os
from . import vgg16
from . import gif
def make_4d(img):
"""Create a 4-dimensional N x H x W x C image.
Parameters
----------
img : np.ndarray
Given image as H x W x C or H x W.
Returns
-------
img : np.ndarray
N x H x W x C image.
Raises
------
ValueError
Unexpected number of dimensions.
"""
if img.ndim == 2:
img = np.expand_dims(img[np.newaxis], 3)
elif img.ndim == 3:
img = img[np.newaxis]
elif img.ndim == 4:
return img
else:
raise ValueError('Incorrect dimensions for image!')
return img
def stylize(content_img, style_img, base_img=None, saveto=None, gif_step=5,
n_iterations=100, style_weight=1.0, content_weight=1.0):
"""Stylization w/ the given content and style images.
Follows the approach in Leon Gatys et al.
Parameters
----------
content_img : np.ndarray
Image to use for finding the content features.
style_img : TYPE
Image to use for finding the style features.
base_img : None, optional
Image to use for the base content. Can be noise or an existing image.
If None, the content image will be used.
saveto : str, optional
Name of GIF image to write to, e.g. "stylization.gif"
gif_step : int, optional
Modulo of iterations to save the current stylization.
n_iterations : int, optional
Number of iterations to run for.
style_weight : float, optional
Weighting on the style features.
content_weight : float, optional
Weighting on the content features.
Returns
-------
stylization : np.ndarray
Final iteration of the stylization.
"""
# Preprocess both content and style images
content_img = make_4d(content_img)
style_img = make_4d(style_img)
if base_img is None:
base_img = content_img
else:
base_img = make_4d(base_img)
# Get Content and Style features
net = vgg16.get_vgg_model()
g = tf.Graph()
with tf.Session(graph=g) as sess:
tf.import_graph_def(net['graph_def'], name='vgg')
names = [op.name for op in g.get_operations()]
x = g.get_tensor_by_name(names[0] + ':0')
content_layer = 'vgg/conv3_2/conv3_2:0'
content_features = g.get_tensor_by_name(
content_layer).eval(feed_dict={
x: content_img,
'vgg/dropout_1/random_uniform:0': [[1.0]],
'vgg/dropout/random_uniform:0': [[1.0]]})
style_layers = ['vgg/conv1_1/conv1_1:0',
'vgg/conv2_1/conv2_1:0',
'vgg/conv3_1/conv3_1:0',
'vgg/conv4_1/conv4_1:0',
'vgg/conv5_1/conv5_1:0']
style_activations = []
for style_i in style_layers:
style_activation_i = g.get_tensor_by_name(style_i).eval(
feed_dict={
x: style_img,
'vgg/dropout_1/random_uniform:0': [[1.0]],
'vgg/dropout/random_uniform:0': [[1.0]]})
style_activations.append(style_activation_i)
style_features = []
for style_activation_i in style_activations:
s_i = np.reshape(style_activation_i,
[-1, style_activation_i.shape[-1]])
gram_matrix = np.matmul(s_i.T, s_i) / s_i.size
style_features.append(gram_matrix.astype(np.float32))
# Optimize both
g = tf.Graph()
with tf.Session(graph=g) as sess:
net_input = tf.Variable(base_img)
tf.import_graph_def(
net['graph_def'],
name='vgg',
input_map={'images:0': net_input})
content_loss = tf.nn.l2_loss((g.get_tensor_by_name(content_layer) -
content_features) /
content_features.size)
style_loss = np.float32(0.0)
for style_layer_i, style_gram_i in zip(style_layers, style_features):
layer_i = g.get_tensor_by_name(style_layer_i)
layer_shape = layer_i.get_shape().as_list()
layer_size = layer_shape[1] * layer_shape[2] * layer_shape[3]
layer_flat = tf.reshape(layer_i, [-1, layer_shape[3]])
gram_matrix = tf.matmul(
tf.transpose(layer_flat), layer_flat) / layer_size
style_loss = tf.add(
style_loss, tf.nn.l2_loss(
(gram_matrix - style_gram_i) /
np.float32(style_gram_i.size)))
loss = content_weight * content_loss + style_weight * style_loss
optimizer = tf.train.AdamOptimizer(0.01).minimize(loss)
sess.run(tf.initialize_all_variables())
imgs = []
for it_i in range(n_iterations):
_, this_loss, synth = sess.run(
[optimizer, loss, net_input],
feed_dict={
'vgg/dropout_1/random_uniform:0': np.ones(
g.get_tensor_by_name(
'vgg/dropout_1/random_uniform:0'
).get_shape().as_list()),
'vgg/dropout/random_uniform:0': np.ones(
g.get_tensor_by_name(
'vgg/dropout/random_uniform:0'
).get_shape().as_list())
})
print("iteration %d, loss: %f, range: (%f - %f)" %
(it_i, this_loss, np.min(synth), np.max(synth)), end='\r')
if it_i % gif_step == 0:
imgs.append(np.clip(synth[0], 0, 1))
if saveto is not None:
gif.build_gif(imgs, saveto=saveto)
return np.clip(synth[0], 0, 1)
def warp_img(img, dx, dy):
"""Apply the motion vectors to the given image.
Parameters
----------
img : np.ndarray
Input image to apply motion to.
dx : np.ndarray
H x W matrix defining the magnitude of the X vector
dy : np.ndarray
H x W matrix defining the magnitude of the Y vector
Returns
-------
img : np.ndarray
Image with pixels warped according to dx, dy.
"""
warped = img.copy()
for row_i in range(img.shape[0]):
for col_i in range(img.shape[1]):
dx_i = int(np.round(dx[row_i, col_i]))
dy_i = int(np.round(dy[row_i, col_i]))
sample_dx = np.clip(dx_i + col_i, 0, img.shape[0] - 1)
sample_dy = np.clip(dy_i + row_i, 0, img.shape[1] - 1)
warped[sample_dy, sample_dx, :] = img[row_i, col_i, :]
return warped
def test_video(style_img='arles.jpg', videodir='kurosawa'):
r"""Test for artistic stylization using video.
This requires the python installation of OpenCV for the Deep Flow algorithm.
If cv2 is not found, then there will be reduced "temporal coherence".
Unfortunately, installing opencv for python3 is not the easiest thing to do.
OSX users can install this using:
$ brew install opencv --with-python3 --with-contrib
then will have to symlink the libraries. I think you can do this w/:
$ brew link --force opencv3
But the problems start to arise depending on which python you have
installed, and it is always a mess w/ homebrew. Sorry!
Your best bet is installing from source. Something along
these lines should get you there:
$ cd ~
$ git clone https://github.com/Itseez/opencv.git
$ cd opencv
$ git checkout 3.1.0
$ cd ~
$ git clone https://github.com/Itseez/opencv_contrib.git
$ cd opencv_contrib
$ git checkout 3.1.0
$ cd ~/opencv
$ mkdir build
$ cd build
$ cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D INSTALL_C_EXAMPLES=OFF \
-D INSTALL_PYTHON_EXAMPLES=OFF \
-D OPENCV_EXTRA_MODULES_PATH=~/opencv_contrib/modules \
-D BUILD_EXAMPLES=OFF ..
Parameters
----------
style_img : str, optional
Location to style image
videodir : str, optional
Location to directory containing images of each frame to stylize.
Returns
-------
imgs : list of np.ndarray
Stylized images for each frame.
"""
has_cv2 = True
try:
import cv2
has_cv2 = True
optflow = cv2.optflow.createOptFlow_DeepFlow()
except ImportError:
has_cv2 = False
style_img = plt.imread(style_img)
content_files = [os.path.join(videodir, f)
for f in os.listdir(videodir) if f.endswith('.png')]
content_img = plt.imread(content_files[0])
from scipy.misc import imresize
style_img = imresize(style_img, (448, 448)).astype(np.float32) / 255.0
content_img = imresize(content_img, (448, 448)).astype(np.float32) / 255.0
if has_cv2:
prev_lum = cv2.cvtColor(content_img, cv2.COLOR_RGB2HSV)[:, :, 2]
else:
prev_lum = (content_img[..., 0] * 0.3 +
content_img[..., 1] * 0.59 +
content_img[..., 2] * 0.11)
imgs = []
stylized = stylize(content_img, style_img, content_weight=5.0,
style_weight=0.5, n_iterations=50)
plt.imsave(fname=content_files[0] + 'stylized.png', arr=stylized)
imgs.append(stylized)
for f in content_files[1:]:
content_img = plt.imread(f)
content_img = imresize(content_img, (448, 448)).astype(np.float32) / 255.0
if has_cv2:
lum = cv2.cvtColor(content_img, cv2.COLOR_RGB2HSV)[:, :, 2]
flow = optflow.calc(prev_lum, lum, None)
warped = warp_img(stylized, flow[..., 0], flow[..., 1])
stylized = stylize(content_img, style_img, content_weight=5.0,
style_weight=0.5, base_img=warped, n_iterations=50)
else:
lum = (content_img[..., 0] * 0.3 +
content_img[..., 1] * 0.59 +
content_img[..., 2] * 0.11)
stylized = stylize(content_img, style_img, content_weight=5.0,
style_weight=0.5, base_img=None, n_iterations=50)
imgs.append(stylized)
plt.imsave(fname=f + 'stylized.png', arr=stylized)
prev_lum = lum
return imgs
def test():
"""Test for artistic stylization."""
from six.moves import urllib
f = ('https://upload.wikimedia.org/wikipedia/commons/thumb/5/54/' +
'Claude_Monet%2C_Impression%2C_soleil_levant.jpg/617px-Claude_Monet' +
'%2C_Impression%2C_soleil_levant.jpg?download')
filepath, _ = urllib.request.urlretrieve(f, f.split('/')[-1], None)
style = plt.imread(filepath)
f = ('https://upload.wikimedia.org/wikipedia/commons/thumb/a/ae/' +
'El_jard%C3%ADn_de_las_Delicias%2C_de_El_Bosco.jpg/640px-El_jard' +
'%C3%ADn_de_las_Delicias%2C_de_El_Bosco.jpg')
filepath, _ = urllib.request.urlretrieve(f, f.split('/')[-1], None)
content = plt.imread(filepath)
stylize(content, style)
if __name__ == '__main__':
test_video()
| apache-2.0 |
jayhetee/dask | dask/array/numpy_compat.py | 9 | 1606 | import numpy as np
try:
isclose = np.isclose
except AttributeError:
def isclose(*args, **kwargs):
raise RuntimeError("You need numpy version 1.7 or greater to use "
"isclose.")
try:
full = np.full
except AttributeError:
def full(shape, fill_value, dtype=None, order=None):
"""Our implementation of numpy.full because your numpy is old."""
if order is not None:
raise NotImplementedError("`order` kwarg is not supported upgrade "
"to Numpy 1.8 or greater for support.")
return np.multiply(fill_value, np.ones(shape, dtype=dtype),
dtype=dtype)
# Taken from scikit-learn:
# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/fixes.py#L84
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Divide with dtype doesn't work on Python 3
def divide(x1, x2, out=None, dtype=None):
"""Implementation of numpy.divide that works with dtype kwarg.
Temporary compatibility fix for a bug in numpy's version. See
https://github.com/numpy/numpy/issues/3484 for the relevant issue."""
x = np.divide(x1, x2, out)
if dtype is not None:
x = x.astype(dtype)
return x
| bsd-3-clause |
google/brain-tokyo-workshop | WANNRelease/prettyNEAT/vis/lplot.py | 2 | 2027 | """
Laconic plot functions to replace some of the matplotlibs verbosity
"""
from matplotlib import pyplot as plt
import numpy as np
import seaborn as sns
# -- File I/O ------------------------------------------------------------ -- #
def lsave(data,fileName):
np.savetxt(fileName, data, delimiter=',',fmt='%1.2e')
def lload(fileName):
return np.loadtxt(fileName, delimiter=',')
# -- Basic Plotting ------------------------------------------------------ -- #
def lplot(*args,label=[],axis=False):
"""Plots an vector, a set of vectors, with or without an x scale
"""
fig, ax = getAxis(axis)
if len(args) == 1: # No xscale
x = np.arange(np.shape(args)[1])
y = args[0]
if len(args) == 2: # xscale given
x = args[0]
y = args[1]
if np.ndim(y) == 2:
for i in range(np.shape(y)[1]):
ax.plot(x,y[:,i],'-')
if len(label) > 0:
ax.legend((label))
else:
ax.plot(x,y,'o-')
if axis is False:
return fig, ax
else:
return ax
def ldist(x, axis=False):
"""Plots histogram with estimated distribution
"""
fig, ax = getAxis(axis)
if isinstance(x, str):
vals = lload(x)
else:
vals = x
sns.distplot(vals.flatten(),ax=ax,bins=10)
#sns.distplot(vals.flatten(),ax=ax,hist_kws={"histtype": "step", "linewidth": 3, "alpha": 1, "color": "g"})
return ax
def lquart(x,y,label=[],axis=False):
"""Plots quartiles, x is a vector, y is a matrix with same length as x
"""
if axis is not False:
ax = axis
fig = ax.figure.canvas
else:
fig, ax = plt.subplots()
q = np.percentile(y,[25,50,75],axis=1)
plt.plot(x,q[1,:],label=label) # median
plt.plot(x,q[0,:],'k:',alpha=0.5)
plt.plot(x,q[2,:],'k:',alpha=0.5)
plt.fill_between(x,q[0,:],q[2,:],alpha=0.25)
return ax
def getAxis(axis):
if axis is not False:
ax = axis
fig = ax.figure.canvas
else:
fig, ax = plt.subplots()
return fig,ax
# -- --------------- -- --------------------------------------------#
| apache-2.0 |
samstern/Greengraph | Greengraph/tests/test_maps.py | 1 | 2937 | from ..greengraph import Greengraph
from ..map import Map
import geopy
from nose.tools import assert_equal, assert_almost_equal
import numpy.testing as np_test
from mock import Mock, patch
import requests
from matplotlib import image
import yaml
import os
import numpy as np
#@patch.object(Greengraph, 'location_sequence')
#@patch.object(Map, 'count_green')
def test_map_constructor():
mock_image= open(os.path.join(os.path.dirname(__file__),'fixtures','NY_2.png'),'rb') #as mock_imgfile:
with patch.object(requests,'get',return_value=Mock(content=mock_image.read())) as mock_get:
with patch.object(image,'imread') as mock_img:
#default
Map(40.7127837, -74.0059413) # New York
#Longon Map(51.5073509,-0.1277583)
mock_get.assert_called_with(
"http://maps.googleapis.com/maps/api/staticmap?",
params={
'sensor':'false',
'zoom':10,
'size':'400x400',
'center':'40.7127837,-74.0059413',
'style':'feature:all|element:labels|visibility:off',
'maptype': 'satellite'
}
)
#changing parameters
Map(41.8781136, -87.6297982,satellite=False,zoom=15,size=(500,350),sensor=True) # New York
mock_get.assert_called_with(
"http://maps.googleapis.com/maps/api/staticmap?",
params={
'sensor':'true',
'zoom':15,
'size':'500x350',
'center':'41.8781136,-87.6297982',
'style':'feature:all|element:labels|visibility:off',
#'maptype': 'satellite'
}
)
def test_green():
mock_image= open(os.path.join(os.path.dirname(__file__),'fixtures','NY_2.png'),'rb')
fixture_green = np.load(os.path.join(os.path.dirname(__file__),'fixtures','ny_green.npy'))
threshold = 1.1
with patch('requests.get', return_value=Mock(content=mock_image.read())) as mock_get:
testMap = Map(41.8781136, -87.6297982) # New York
assert_equal(fixture_green.shape,testMap.green(threshold).shape)
assert (testMap.green(threshold) == fixture_green).all() == True
assert (testMap.green(1.5) == fixture_green).all() == False
def test_count_green():
mock_image= open(os.path.join(os.path.dirname(__file__),'fixtures','NY_2.png'),'rb')
fixture_green = np.load(os.path.join(os.path.dirname(__file__),'fixtures','ny_green.npy'))
threshold = 1.1
with patch('requests.get', return_value=Mock(content=mock_image.read())) as mock_get:
testMap = Map(41.8781136, -87.6297982) # New York
count_from_fixture=np.sum(fixture_green)
assert (testMap.count_green() == count_from_fixture)
assert (testMap.count_green(1.5) != count_from_fixture) | mit |
uqyge/combustionML | FPV_ANN_pureResNet/data_reader_2.py | 1 | 5981 | import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler, StandardScaler
class data_scaler(object):
def __init__(self):
self.norm = None
self.norm_1 = None
self.std = None
self.case = None
self.scale = 1
self.bias = 1e-20
# self.bias = 1
self.switcher = {
'min_std': 'min_std',
'std2': 'std2',
'std_min': 'std_min',
'min': 'min',
'no': 'no',
'log': 'log',
'log_min': 'log_min',
'log2': 'log2',
'tan': 'tan'
}
def fit_transform(self, input_data, case):
self.case = case
if self.switcher.get(self.case) == 'min_std':
self.norm = MinMaxScaler()
self.std = StandardScaler()
out = self.norm.fit_transform(input_data)
out = self.std.fit_transform(out)
if self.switcher.get(self.case) == 'std2':
self.std = StandardScaler()
out = self.std.fit_transform(input_data)
if self.switcher.get(self.case) == 'std_min':
self.norm = MinMaxScaler()
self.std = StandardScaler()
out = self.std.fit_transform(input_data)
out = self.norm.fit_transform(out)
if self.switcher.get(self.case) == 'min':
self.norm = MinMaxScaler()
out = self.norm.fit_transform(input_data)
if self.switcher.get(self.case) == 'no':
self.norm = MinMaxScaler()
self.std = StandardScaler()
out = input_data
if self.switcher.get(self.case) == 'log':
out = - np.log(np.asarray(input_data / self.scale) + self.bias)
self.std = StandardScaler()
out = self.std.fit_transform(out)
if self.switcher.get(self.case) == 'log_min':
out = - np.log(np.asarray(input_data / self.scale) + self.bias)
self.norm = MinMaxScaler()
out = self.norm.fit_transform(out)
if self.switcher.get(self.case) == 'log2':
self.norm = MinMaxScaler()
self.norm_1 = MinMaxScaler()
out = self.norm.fit_transform(input_data)
out = np.log(np.asarray(out) + self.bias)
out = self.norm_1.fit_transform(out)
if self.switcher.get(self.case) == 'tan':
self.norm = MaxAbsScaler()
self.std = StandardScaler()
out = self.std.fit_transform(input_data)
out = self.norm.fit_transform(out)
out = np.tan(out / (2 * np.pi + self.bias))
return out
def transform(self, input_data):
if self.switcher.get(self.case) == 'min_std':
out = self.norm.transform(input_data)
out = self.std.transform(out)
if self.switcher.get(self.case) == 'std2':
out = self.std.transform(input_data)
if self.switcher.get(self.case) == 'std_min':
out = self.std.transform(input_data)
out = self.norm.transform(out)
if self.switcher.get(self.case) == 'min':
out = self.norm.transform(input_data)
if self.switcher.get(self.case) == 'no':
out = input_data
if self.switcher.get(self.case) == 'log':
out = - np.log(np.asarray(input_data / self.scale) + self.bias)
out = self.std.transform(out)
if self.switcher.get(self.case) == 'log_min':
out = - np.log(np.asarray(input_data / self.scale) + self.bias)
out = self.norm.transform(out)
if self.switcher.get(self.case) == 'log2':
out = self.norm.transform(input_data)
out = np.log(np.asarray(out) + self.bias)
out = self.norm_1.transform(out)
if self.switcher.get(self.case) == 'tan':
out = self.std.transform(input_data)
out = self.norm.transform(out)
out = np.tan(out / (2 * np.pi + self.bias))
return out
def inverse_transform(self, input_data):
if self.switcher.get(self.case) == 'min_std':
out = self.std.inverse_transform(input_data)
out = self.norm.inverse_transform(out)
if self.switcher.get(self.case) == 'std2':
out = self.std.inverse_transform(input_data)
if self.switcher.get(self.case) == 'std_min':
out = self.norm.inverse_transform(input_data)
out = self.std.inverse_transform(out)
if self.switcher.get(self.case) == 'min':
out = self.norm.inverse_transform(input_data)
if self.switcher.get(self.case) == 'no':
out = input_data
if self.switcher.get(self.case) == 'log':
out = self.std.inverse_transform(input_data)
out = (np.exp(-out) - self.bias) * self.scale
if self.switcher.get(self.case) == 'log_min':
out = self.norm.inverse_transform(input_data)
out = (np.exp(-out) - self.bias) * self.scale
if self.switcher.get(self.case) == 'log2':
out = self.norm_1.inverse_transform(input_data)
out = np.exp(out) - self.bias
out = self.norm.inverse_transform(out)
if self.switcher.get(self.case) == 'tan':
out = (2 * np.pi + self.bias) * np.arctan(input_data)
out = self.norm.inverse_transform(out)
out = self.std.inverse_transform(out)
return out
def read_h5_data(fileName, input_features, labels):
df = pd.read_hdf(fileName)
df = df[df['f'] < 0.45]
input_df = df[input_features]
in_scaler = data_scaler()
input_np = in_scaler.fit_transform(input_df.values, 'no')
label_df = df[labels].clip(0)
# if 'PVs' in labels:
# label_df['PVs']=np.log(label_df['PVs']+1)
out_scaler = data_scaler()
label_np = out_scaler.fit_transform(label_df.values, 'std2')
return input_np, label_np, df, in_scaler, out_scaler | mit |
andyh616/mne-python | mne/tests/test_epochs.py | 1 | 71695 | # Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
from copy import deepcopy
from nose.tools import (assert_true, assert_equal, assert_raises,
assert_not_equal)
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_allclose)
import numpy as np
import copy as cp
import warnings
from scipy import fftpack
import matplotlib
from mne import (io, Epochs, read_events, pick_events, read_epochs,
equalize_channels, pick_types, pick_channels, read_evokeds,
write_evokeds)
from mne.epochs import (
bootstrap, equalize_epoch_counts, combine_event_ids, add_channels_epochs,
EpochsArray, concatenate_epochs, _BaseEpochs)
from mne.utils import (_TempDir, requires_pandas, slow_test,
clean_warning_registry, run_tests_if_main,
requires_scipy_version)
from mne.io.meas_info import create_info
from mne.io.proj import _has_eeg_average_ref_proj
from mne.event import merge_events
from mne.io.constants import FIFF
from mne.externals.six import text_type
from mne.externals.six.moves import zip, cPickle as pickle
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
evoked_nf_name = op.join(base_dir, 'test-nf-ave.fif')
event_id, tmin, tmax = 1, -0.2, 0.5
event_id_2 = 2
def _get_data():
raw = io.Raw(raw_fname, add_eeg_ref=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
ecg=True, eog=True, include=['STI 014'],
exclude='bads')
return raw, events, picks
reject = dict(grad=1000e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
flat = dict(grad=1e-15, mag=1e-15)
clean_warning_registry() # really clean warning stack
def test_reject():
"""Test epochs rejection
"""
raw, events, picks = _get_data()
# cull the list just to contain the relevant event
events = events[events[:, 2] == event_id, :]
selection = np.arange(3)
drop_log = [[]] * 3 + [['MEG 2443']] * 4
assert_raises(TypeError, pick_types, raw)
picks_meg = pick_types(raw.info, meg=True, eeg=False)
assert_raises(TypeError, Epochs, raw, events, event_id, tmin, tmax,
picks=picks, preload=False, reject='foo')
assert_raises(ValueError, Epochs, raw, events, event_id, tmin, tmax,
picks=picks_meg, preload=False, reject=dict(eeg=1.))
assert_raises(KeyError, Epochs, raw, events, event_id, tmin, tmax,
picks=picks, preload=False, reject=dict(foo=1.))
data_7 = dict()
keep_idx = [0, 1, 2]
for preload in (True, False):
for proj in (True, False, 'delayed'):
# no rejection
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=preload)
assert_raises(ValueError, epochs.drop_bad_epochs, reject='foo')
epochs.drop_bad_epochs()
assert_equal(len(epochs), len(events))
assert_array_equal(epochs.selection, np.arange(len(events)))
assert_array_equal(epochs.drop_log, [[]] * 7)
if proj not in data_7:
data_7[proj] = epochs.get_data()
assert_array_equal(epochs.get_data(), data_7[proj])
# with rejection
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=reject, preload=preload)
epochs.drop_bad_epochs()
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.selection, selection)
assert_array_equal(epochs.drop_log, drop_log)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
# rejection post-hoc
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=preload)
epochs.drop_bad_epochs()
assert_equal(len(epochs), len(events))
assert_array_equal(epochs.get_data(), data_7[proj])
epochs.drop_bad_epochs(reject)
assert_equal(len(epochs), len(events) - 4)
assert_equal(len(epochs), len(epochs.get_data()))
assert_array_equal(epochs.selection, selection)
assert_array_equal(epochs.drop_log, drop_log)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
# rejection twice
reject_part = dict(grad=1100e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=reject_part, preload=preload)
epochs.drop_bad_epochs()
assert_equal(len(epochs), len(events) - 1)
epochs.drop_bad_epochs(reject)
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.selection, selection)
assert_array_equal(epochs.drop_log, drop_log)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
# ensure that thresholds must become more stringent, not less
assert_raises(ValueError, epochs.drop_bad_epochs, reject_part)
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
epochs.drop_bad_epochs(flat=dict(mag=1.))
assert_equal(len(epochs), 0)
assert_raises(ValueError, epochs.drop_bad_epochs,
flat=dict(mag=0.))
# rejection of subset of trials (ensure array ownership)
reject_part = dict(grad=1100e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=None, preload=preload)
epochs = epochs[:-1]
epochs.drop_bad_epochs(reject=reject)
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
def test_decim():
"""Test epochs decimation
"""
# First with EpochsArray
n_epochs, n_channels, n_times = 5, 10, 20
dec_1, dec_2 = 2, 3
decim = dec_1 * dec_2
sfreq = 1000.
sfreq_new = sfreq / decim
data = np.random.randn(n_epochs, n_channels, n_times)
events = np.array([np.arange(n_epochs), [0] * n_epochs, [1] * n_epochs]).T
info = create_info(n_channels, sfreq, 'eeg')
info['lowpass'] = sfreq_new / float(decim)
epochs = EpochsArray(data, info, events)
data_epochs = epochs.decimate(decim, copy=True).get_data()
data_epochs_2 = epochs.decimate(dec_1).decimate(dec_2).get_data()
assert_array_equal(data_epochs, data[:, :, ::decim])
assert_array_equal(data_epochs, data_epochs_2)
# Now let's do it with some real data
raw, events, picks = _get_data()
sfreq_new = raw.info['sfreq'] / decim
raw.info['lowpass'] = sfreq_new / 4. # suppress aliasing warnings
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=False)
assert_raises(ValueError, epochs.decimate, -1)
expected_data = epochs.get_data()[:, :, ::decim]
expected_times = epochs.times[::decim]
for preload in (True, False):
# at init
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=decim,
preload=preload)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# split between init and afterward
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_1,
preload=preload).decimate(dec_2)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_2,
preload=preload).decimate(dec_1)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# split between init and afterward, with preload in between
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_1,
preload=preload)
epochs.preload_data()
epochs = epochs.decimate(dec_2)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_2,
preload=preload)
epochs.preload_data()
epochs = epochs.decimate(dec_1)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# decimate afterward
epochs = Epochs(raw, events, event_id, tmin, tmax,
preload=preload).decimate(decim)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# decimate afterward, with preload in between
epochs = Epochs(raw, events, event_id, tmin, tmax,
preload=preload)
epochs.preload_data()
epochs.decimate(decim)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
def test_base_epochs():
"""Test base epochs class
"""
raw = _get_data()[0]
epochs = _BaseEpochs(raw.info, None, np.ones((1, 3), int),
event_id, tmin, tmax)
assert_raises(NotImplementedError, epochs.get_data)
# events with non integers
assert_raises(ValueError, _BaseEpochs, raw.info, None,
np.ones((1, 3), float), event_id, tmin, tmax)
assert_raises(ValueError, _BaseEpochs, raw.info, None,
np.ones((1, 3, 2), int), event_id, tmin, tmax)
@requires_scipy_version('0.14')
def test_savgol_filter():
"""Test savgol filtering
"""
h_freq = 10.
raw, events = _get_data()[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax)
assert_raises(RuntimeError, epochs.savgol_filter, 10.)
epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)
freqs = fftpack.fftfreq(len(epochs.times), 1. / epochs.info['sfreq'])
data = np.abs(fftpack.fft(epochs.get_data()))
match_mask = np.logical_and(freqs >= 0, freqs <= h_freq / 2.)
mismatch_mask = np.logical_and(freqs >= h_freq * 2, freqs < 50.)
epochs.savgol_filter(h_freq)
data_filt = np.abs(fftpack.fft(epochs.get_data()))
# decent in pass-band
assert_allclose(np.mean(data[:, :, match_mask], 0),
np.mean(data_filt[:, :, match_mask], 0),
rtol=1e-4, atol=1e-2)
# suppression in stop-band
assert_true(np.mean(data[:, :, mismatch_mask]) >
np.mean(data_filt[:, :, mismatch_mask]) * 5)
def test_epochs_hash():
"""Test epoch hashing
"""
raw, events = _get_data()[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax)
assert_raises(RuntimeError, epochs.__hash__)
epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)
assert_equal(hash(epochs), hash(epochs))
epochs_2 = Epochs(raw, events, event_id, tmin, tmax, preload=True)
assert_equal(hash(epochs), hash(epochs_2))
# do NOT use assert_equal here, failing output is terrible
assert_true(pickle.dumps(epochs) == pickle.dumps(epochs_2))
epochs_2._data[0, 0, 0] -= 1
assert_not_equal(hash(epochs), hash(epochs_2))
def test_event_ordering():
"""Test event order"""
raw, events = _get_data()[:2]
events2 = events.copy()
np.random.shuffle(events2)
for ii, eve in enumerate([events, events2]):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
Epochs(raw, eve, event_id, tmin, tmax,
baseline=(None, 0), reject=reject, flat=flat)
assert_equal(len(w), ii)
if ii > 0:
assert_true('chronologically' in '%s' % w[-1].message)
def test_epochs_bad_baseline():
"""Test Epochs initialization with bad baseline parameters
"""
raw, events = _get_data()[:2]
assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (-0.2, 0))
assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (0, 0.4))
def test_epoch_combine_ids():
"""Test combining event ids in epochs compared to events
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3,
'd': 4, 'e': 5, 'f': 32},
tmin, tmax, picks=picks, preload=False)
events_new = merge_events(events, [1, 2], 12)
epochs_new = combine_event_ids(epochs, ['a', 'b'], {'ab': 12})
assert_equal(epochs_new['ab'].name, 'ab')
assert_array_equal(events_new, epochs_new.events)
# should probably add test + functionality for non-replacement XXX
def test_epoch_multi_ids():
"""Test epoch selection via multiple/partial keys
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a/b/a': 1, 'a/b/b': 2, 'a/c': 3,
'b/d': 4, 'a_b': 5},
tmin, tmax, picks=picks, preload=False)
epochs_regular = epochs[['a', 'b']]
epochs_multi = epochs[['a/b/a', 'a/b/b']]
assert_array_equal(epochs_regular.events, epochs_multi.events)
def test_read_epochs_bad_events():
"""Test epochs when events are at the beginning or the end of the file
"""
raw, events, picks = _get_data()
# Event at the beginning
epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks, baseline=(None, 0))
with warnings.catch_warnings(record=True):
evoked = epochs.average()
epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks, baseline=(None, 0))
assert_true(repr(epochs)) # test repr
epochs.drop_bad_epochs()
assert_true(repr(epochs))
with warnings.catch_warnings(record=True):
evoked = epochs.average()
# Event at the end
epochs = Epochs(raw, np.array([[raw.last_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks, baseline=(None, 0))
with warnings.catch_warnings(record=True):
evoked = epochs.average()
assert evoked
warnings.resetwarnings()
@slow_test
def test_read_write_epochs():
"""Test epochs from raw files with IO as fif file
"""
raw, events, picks = _get_data()
tempdir = _TempDir()
temp_fname = op.join(tempdir, 'test-epo.fif')
temp_fname_no_bl = op.join(tempdir, 'test_no_bl-epo.fif')
baseline = (None, 0)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=baseline, preload=True)
epochs_no_bl = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=None, preload=True)
assert_true(epochs_no_bl.baseline is None)
evoked = epochs.average()
data = epochs.get_data()
# Bad tmin/tmax parameters
assert_raises(ValueError, Epochs, raw, events, event_id, tmax, tmin,
baseline=None)
epochs_no_id = Epochs(raw, pick_events(events, include=event_id),
None, tmin, tmax, picks=picks,
baseline=(None, 0))
assert_array_equal(data, epochs_no_id.get_data())
eog_picks = pick_types(raw.info, meg=False, eeg=False, stim=False,
eog=True, exclude='bads')
eog_ch_names = [raw.ch_names[k] for k in eog_picks]
epochs.drop_channels(eog_ch_names)
epochs_no_bl.drop_channels(eog_ch_names)
assert_true(len(epochs.info['chs']) == len(epochs.ch_names) ==
epochs.get_data().shape[1])
data_no_eog = epochs.get_data()
assert_true(data.shape[1] == (data_no_eog.shape[1] + len(eog_picks)))
# test decim kwarg
with warnings.catch_warnings(record=True) as w:
# decim with lowpass
warnings.simplefilter('always')
epochs_dec = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), decim=4)
assert_equal(len(w), 1)
# decim without lowpass
lowpass = raw.info['lowpass']
raw.info['lowpass'] = None
epochs_dec = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), decim=4)
assert_equal(len(w), 2)
raw.info['lowpass'] = lowpass
data_dec = epochs_dec.get_data()
assert_allclose(data[:, :, epochs_dec._decim_slice], data_dec, rtol=1e-7,
atol=1e-12)
evoked_dec = epochs_dec.average()
assert_allclose(evoked.data[:, epochs_dec._decim_slice],
evoked_dec.data, rtol=1e-12)
n = evoked.data.shape[1]
n_dec = evoked_dec.data.shape[1]
n_dec_min = n // 4
assert_true(n_dec_min <= n_dec <= n_dec_min + 1)
assert_true(evoked_dec.info['sfreq'] == evoked.info['sfreq'] / 4)
# test IO
epochs.save(temp_fname)
epochs_no_bl.save(temp_fname_no_bl)
epochs_read = read_epochs(temp_fname)
epochs_no_bl_read = read_epochs(temp_fname_no_bl)
assert_raises(ValueError, epochs.apply_baseline, baseline=[1, 2, 3])
epochs_no_bl_read.apply_baseline(baseline)
assert_true(epochs_no_bl_read.baseline == baseline)
assert_true(str(epochs_read).startswith('<Epochs'))
assert_array_equal(epochs_no_bl_read.times, epochs.times)
assert_array_almost_equal(epochs_read.get_data(), epochs.get_data())
assert_array_almost_equal(epochs.get_data(), epochs_no_bl_read.get_data())
assert_array_equal(epochs_read.times, epochs.times)
assert_array_almost_equal(epochs_read.average().data, evoked.data)
assert_equal(epochs_read.proj, epochs.proj)
bmin, bmax = epochs.baseline
if bmin is None:
bmin = epochs.times[0]
if bmax is None:
bmax = epochs.times[-1]
baseline = (bmin, bmax)
assert_array_almost_equal(epochs_read.baseline, baseline)
assert_array_almost_equal(epochs_read.tmin, epochs.tmin, 2)
assert_array_almost_equal(epochs_read.tmax, epochs.tmax, 2)
assert_equal(epochs_read.event_id, epochs.event_id)
epochs.event_id.pop('1')
epochs.event_id.update({'a:a': 1}) # test allow for ':' in key
epochs.save(op.join(tempdir, 'foo-epo.fif'))
epochs_read2 = read_epochs(op.join(tempdir, 'foo-epo.fif'))
assert_equal(epochs_read2.event_id, epochs.event_id)
# add reject here so some of the epochs get dropped
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
epochs.save(temp_fname)
# ensure bad events are not saved
epochs_read3 = read_epochs(temp_fname)
assert_array_equal(epochs_read3.events, epochs.events)
data = epochs.get_data()
assert_true(epochs_read3.events.shape[0] == data.shape[0])
# test copying loaded one (raw property)
epochs_read4 = epochs_read3.copy()
assert_array_almost_equal(epochs_read4.get_data(), data)
# test equalizing loaded one (drop_log property)
epochs_read4.equalize_event_counts(epochs.event_id)
epochs.drop_epochs([1, 2], reason='can we recover orig ID?')
epochs.save(temp_fname)
epochs_read5 = read_epochs(temp_fname)
assert_array_equal(epochs_read5.selection, epochs.selection)
assert_equal(len(epochs_read5.selection), len(epochs_read5.events))
assert_array_equal(epochs_read5.drop_log, epochs.drop_log)
# Test that one can drop channels on read file
epochs_read5.drop_channels(epochs_read5.ch_names[:1])
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs_badname = op.join(tempdir, 'test-bad-name.fif.gz')
epochs.save(epochs_badname)
read_epochs(epochs_badname)
assert_true(len(w) == 2)
# test loading epochs with missing events
epochs = Epochs(raw, events, dict(foo=1, bar=999), tmin, tmax, picks=picks,
on_missing='ignore')
epochs.save(temp_fname)
epochs_read = read_epochs(temp_fname)
assert_allclose(epochs.get_data(), epochs_read.get_data())
assert_array_equal(epochs.events, epochs_read.events)
assert_equal(set(epochs.event_id.keys()),
set(text_type(x) for x in epochs_read.event_id.keys()))
# test saving split epoch files
epochs.save(temp_fname, split_size='7MB')
epochs_read = read_epochs(temp_fname)
assert_allclose(epochs.get_data(), epochs_read.get_data())
assert_array_equal(epochs.events, epochs_read.events)
assert_array_equal(epochs.selection, epochs_read.selection)
assert_equal(epochs.drop_log, epochs_read.drop_log)
# Test that having a single time point works
epochs.preload_data()
epochs.crop(0, 0, copy=False)
assert_equal(len(epochs.times), 1)
assert_equal(epochs.get_data().shape[-1], 1)
epochs.save(temp_fname)
epochs_read = read_epochs(temp_fname)
assert_equal(len(epochs_read.times), 1)
assert_equal(epochs.get_data().shape[-1], 1)
def test_epochs_proj():
"""Test handling projection (apply proj in Raw or in Epochs)
"""
tempdir = _TempDir()
raw, events, picks = _get_data()
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
this_picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
eog=True, exclude=exclude)
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True)
assert_true(all(p['active'] is True for p in epochs.info['projs']))
evoked = epochs.average()
assert_true(all(p['active'] is True for p in evoked.info['projs']))
data = epochs.get_data()
raw_proj = io.Raw(raw_fname, proj=True)
epochs_no_proj = Epochs(raw_proj, events[:4], event_id, tmin, tmax,
picks=this_picks, baseline=(None, 0), proj=False)
data_no_proj = epochs_no_proj.get_data()
assert_true(all(p['active'] is True for p in epochs_no_proj.info['projs']))
evoked_no_proj = epochs_no_proj.average()
assert_true(all(p['active'] is True for p in evoked_no_proj.info['projs']))
assert_true(epochs_no_proj.proj is True) # as projs are active from Raw
assert_array_almost_equal(data, data_no_proj, decimal=8)
# make sure we can exclude avg ref
this_picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
eog=True, exclude=exclude)
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True, add_eeg_ref=True)
assert_true(_has_eeg_average_ref_proj(epochs.info['projs']))
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True, add_eeg_ref=False)
assert_true(not _has_eeg_average_ref_proj(epochs.info['projs']))
# make sure we don't add avg ref when a custom ref has been applied
raw.info['custom_ref_applied'] = True
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True)
assert_true(not _has_eeg_average_ref_proj(epochs.info['projs']))
# From GH#2200:
# This has no problem
proj = raw.info['projs']
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=False)
epochs.info['projs'] = []
data = epochs.copy().add_proj(proj).apply_proj().get_data()
# save and reload data
fname_epo = op.join(tempdir, 'temp-epo.fif')
epochs.save(fname_epo) # Save without proj added
epochs_read = read_epochs(fname_epo)
epochs_read.add_proj(proj)
epochs_read.apply_proj() # This used to bomb
data_2 = epochs_read.get_data() # Let's check the result
assert_allclose(data, data_2, atol=1e-15, rtol=1e-3)
def test_evoked_arithmetic():
"""Test arithmetic of evoked data
"""
raw, events, picks = _get_data()
epochs1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked1 = epochs1.average()
epochs2 = Epochs(raw, events[4:8], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked2 = epochs2.average()
epochs = Epochs(raw, events[:8], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked = epochs.average()
evoked_sum = evoked1 + evoked2
assert_array_equal(evoked.data, evoked_sum.data)
assert_array_equal(evoked.times, evoked_sum.times)
assert_true(evoked_sum.nave == (evoked1.nave + evoked2.nave))
evoked_diff = evoked1 - evoked1
assert_array_equal(np.zeros_like(evoked.data), evoked_diff.data)
def test_evoked_io_from_epochs():
"""Test IO of evoked data made from epochs
"""
tempdir = _TempDir()
raw, events, picks = _get_data()
# offset our tmin so we don't get exactly a zero value when decimating
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, tmin + 0.011, tmax,
picks=picks, baseline=(None, 0), decim=5)
assert_true(len(w) == 1)
evoked = epochs.average()
evoked.save(op.join(tempdir, 'evoked-ave.fif'))
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4,
atol=1 / evoked.info['sfreq'])
# now let's do one with negative time
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, 0.1, tmax,
picks=picks, baseline=(0.1, 0.2), decim=5)
evoked = epochs.average()
evoked.save(op.join(tempdir, 'evoked-ave.fif'))
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
# should be equivalent to a cropped original
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, -0.2, tmax,
picks=picks, baseline=(0.1, 0.2), decim=5)
evoked = epochs.average()
evoked.crop(0.099, None)
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
def test_evoked_standard_error():
"""Test calculation and read/write of standard error
"""
raw, events, picks = _get_data()
tempdir = _TempDir()
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked = [epochs.average(), epochs.standard_error()]
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), evoked)
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), [0, 1])
evoked3 = [read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 'Unknown'),
read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 'Unknown',
kind='standard_error')]
for evoked_new in [evoked2, evoked3]:
assert_true(evoked_new[0]._aspect_kind ==
FIFF.FIFFV_ASPECT_AVERAGE)
assert_true(evoked_new[0].kind == 'average')
assert_true(evoked_new[1]._aspect_kind ==
FIFF.FIFFV_ASPECT_STD_ERR)
assert_true(evoked_new[1].kind == 'standard_error')
for ave, ave2 in zip(evoked, evoked_new):
assert_array_almost_equal(ave.data, ave2.data)
assert_array_almost_equal(ave.times, ave2.times)
assert_equal(ave.nave, ave2.nave)
assert_equal(ave._aspect_kind, ave2._aspect_kind)
assert_equal(ave.kind, ave2.kind)
assert_equal(ave.last, ave2.last)
assert_equal(ave.first, ave2.first)
def test_reject_epochs():
"""Test of epochs rejection
"""
raw, events, picks = _get_data()
events1 = events[events[:, 2] == event_id]
epochs = Epochs(raw, events1,
event_id, tmin, tmax, baseline=(None, 0),
reject=reject, flat=flat)
assert_raises(RuntimeError, len, epochs)
n_events = len(epochs.events)
data = epochs.get_data()
n_clean_epochs = len(data)
# Should match
# mne_process_raw --raw test_raw.fif --projoff \
# --saveavetag -ave --ave test.ave --filteroff
assert_true(n_events > n_clean_epochs)
assert_true(n_clean_epochs == 3)
assert_true(epochs.drop_log == [[], [], [], ['MEG 2443'], ['MEG 2443'],
['MEG 2443'], ['MEG 2443']])
# Ensure epochs are not dropped based on a bad channel
raw_2 = raw.copy()
raw_2.info['bads'] = ['MEG 2443']
reject_crazy = dict(grad=1000e-15, mag=4e-15, eeg=80e-9, eog=150e-9)
epochs = Epochs(raw_2, events1, event_id, tmin, tmax, baseline=(None, 0),
reject=reject_crazy, flat=flat)
epochs.drop_bad_epochs()
assert_true(all('MEG 2442' in e for e in epochs.drop_log))
assert_true(all('MEG 2443' not in e for e in epochs.drop_log))
# Invalid reject_tmin/reject_tmax/detrend
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=1., reject_tmax=0)
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=tmin - 1, reject_tmax=1.)
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=0., reject_tmax=tmax + 1)
epochs = Epochs(raw, events1, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, flat=flat,
reject_tmin=0., reject_tmax=.1)
data = epochs.get_data()
n_clean_epochs = len(data)
assert_true(n_clean_epochs == 7)
assert_true(len(epochs) == 7)
assert_true(epochs.times[epochs._reject_time][0] >= 0.)
assert_true(epochs.times[epochs._reject_time][-1] <= 0.1)
# Invalid data for _is_good_epoch function
epochs = Epochs(raw, events1, event_id, tmin, tmax, reject=None, flat=None)
assert_equal(epochs._is_good_epoch(None), (False, ['NO_DATA']))
assert_equal(epochs._is_good_epoch(np.zeros((1, 1))),
(False, ['TOO_SHORT']))
data = epochs[0].get_data()[0]
assert_equal(epochs._is_good_epoch(data), (True, None))
def test_preload_epochs():
"""Test preload of epochs
"""
raw, events, picks = _get_data()
epochs_preload = Epochs(raw, events[:16], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=True,
reject=reject, flat=flat)
data_preload = epochs_preload.get_data()
epochs = Epochs(raw, events[:16], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
data = epochs.get_data()
assert_array_equal(data_preload, data)
assert_array_almost_equal(epochs_preload.average().data,
epochs.average().data, 18)
def test_indexing_slicing():
"""Test of indexing and slicing operations
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:20], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
data_normal = epochs.get_data()
n_good_events = data_normal.shape[0]
# indices for slicing
start_index = 1
end_index = n_good_events - 1
assert((end_index - start_index) > 0)
for preload in [True, False]:
epochs2 = Epochs(raw, events[:20], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=preload,
reject=reject, flat=flat)
if not preload:
epochs2.drop_bad_epochs()
# using slicing
epochs2_sliced = epochs2[start_index:end_index]
data_epochs2_sliced = epochs2_sliced.get_data()
assert_array_equal(data_epochs2_sliced,
data_normal[start_index:end_index])
# using indexing
pos = 0
for idx in range(start_index, end_index):
data = epochs2_sliced[pos].get_data()
assert_array_equal(data[0], data_normal[idx])
pos += 1
# using indexing with an int
data = epochs2[data_epochs2_sliced.shape[0]].get_data()
assert_array_equal(data, data_normal[[idx]])
# using indexing with an array
idx = np.random.randint(0, data_epochs2_sliced.shape[0], 10)
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
# using indexing with a list of indices
idx = [0]
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
idx = [0, 1]
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
def test_comparision_with_c():
"""Test of average obtained vs C code
"""
raw, events = _get_data()[:2]
c_evoked = read_evokeds(evoked_nf_name, condition=0)
epochs = Epochs(raw, events, event_id, tmin, tmax,
baseline=None, preload=True,
reject=None, flat=None)
evoked = epochs.average()
sel = pick_channels(c_evoked.ch_names, evoked.ch_names)
evoked_data = evoked.data
c_evoked_data = c_evoked.data[sel]
assert_true(evoked.nave == c_evoked.nave)
assert_array_almost_equal(evoked_data, c_evoked_data, 10)
assert_array_almost_equal(evoked.times, c_evoked.times, 12)
def test_crop():
"""Test of crop of epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
assert_raises(RuntimeError, epochs.crop, None, 0.2) # not preloaded
data_normal = epochs.get_data()
epochs2 = Epochs(raw, events[:5], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=True,
reject=reject, flat=flat)
with warnings.catch_warnings(record=True) as w:
epochs2.crop(-20, 200)
assert_true(len(w) == 2)
# indices for slicing
tmin_window = tmin + 0.1
tmax_window = tmax - 0.1
tmask = (epochs.times >= tmin_window) & (epochs.times <= tmax_window)
assert_true(tmin_window > tmin)
assert_true(tmax_window < tmax)
epochs3 = epochs2.crop(tmin_window, tmax_window, copy=True)
data3 = epochs3.get_data()
epochs2.crop(tmin_window, tmax_window)
data2 = epochs2.get_data()
assert_array_equal(data2, data_normal[:, :, tmask])
assert_array_equal(data3, data_normal[:, :, tmask])
# test time info is correct
epochs = EpochsArray(np.zeros((1, 1, 1000)), create_info(1, 1000., 'eeg'),
np.ones((1, 3), int), tmin=-0.2)
epochs.crop(-.200, .700)
last_time = epochs.times[-1]
with warnings.catch_warnings(record=True): # not LP filtered
epochs.decimate(10)
assert_allclose(last_time, epochs.times[-1])
def test_resample():
"""Test of resample of epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
assert_raises(RuntimeError, epochs.resample, 100)
epochs_o = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=reject, flat=flat)
epochs = epochs_o.copy()
data_normal = cp.deepcopy(epochs.get_data())
times_normal = cp.deepcopy(epochs.times)
sfreq_normal = epochs.info['sfreq']
# upsample by 2
epochs = epochs_o.copy()
epochs.resample(sfreq_normal * 2, npad=0)
data_up = cp.deepcopy(epochs.get_data())
times_up = cp.deepcopy(epochs.times)
sfreq_up = epochs.info['sfreq']
# downsamply by 2, which should match
epochs.resample(sfreq_normal, npad=0)
data_new = cp.deepcopy(epochs.get_data())
times_new = cp.deepcopy(epochs.times)
sfreq_new = epochs.info['sfreq']
assert_true(data_up.shape[2] == 2 * data_normal.shape[2])
assert_true(sfreq_up == 2 * sfreq_normal)
assert_true(sfreq_new == sfreq_normal)
assert_true(len(times_up) == 2 * len(times_normal))
assert_array_almost_equal(times_new, times_normal, 10)
assert_true(data_up.shape[2] == 2 * data_normal.shape[2])
assert_array_almost_equal(data_new, data_normal, 5)
# use parallel
epochs = epochs_o.copy()
epochs.resample(sfreq_normal * 2, n_jobs=2, npad=0)
assert_true(np.allclose(data_up, epochs._data, rtol=1e-8, atol=1e-16))
# test copy flag
epochs = epochs_o.copy()
epochs_resampled = epochs.resample(sfreq_normal * 2, npad=0, copy=True)
assert_true(epochs_resampled is not epochs)
epochs_resampled = epochs.resample(sfreq_normal * 2, npad=0, copy=False)
assert_true(epochs_resampled is epochs)
def test_detrend():
"""Test detrending of epochs
"""
raw, events, picks = _get_data()
# test first-order
epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, detrend=1)
epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, detrend=None)
data_picks = pick_types(epochs_1.info, meg=True, eeg=True,
exclude='bads')
evoked_1 = epochs_1.average()
evoked_2 = epochs_2.average()
evoked_2.detrend(1)
# Due to roundoff these won't be exactly equal, but they should be close
assert_true(np.allclose(evoked_1.data, evoked_2.data,
rtol=1e-8, atol=1e-20))
# test zeroth-order case
for preload in [True, False]:
epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, None), preload=preload)
epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, preload=preload, detrend=0)
a = epochs_1.get_data()
b = epochs_2.get_data()
# All data channels should be almost equal
assert_true(np.allclose(a[:, data_picks, :], b[:, data_picks, :],
rtol=1e-16, atol=1e-20))
# There are non-M/EEG channels that should not be equal:
assert_true(not np.allclose(a, b))
assert_raises(ValueError, Epochs, raw, events[:4], event_id, tmin, tmax,
detrend=2)
def test_bootstrap():
"""Test of bootstrapping of epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=reject, flat=flat)
epochs2 = bootstrap(epochs, random_state=0)
assert_true(len(epochs2.events) == len(epochs.events))
assert_true(epochs._data.shape == epochs2._data.shape)
def test_epochs_copy():
"""Test copy epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=reject, flat=flat)
copied = epochs.copy()
assert_array_equal(epochs._data, copied._data)
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
copied = epochs.copy()
data = epochs.get_data()
copied_data = copied.get_data()
assert_array_equal(data, copied_data)
def test_iter_evoked():
"""Test the iterator for epochs -> evoked
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
for ii, ev in enumerate(epochs.iter_evoked()):
x = ev.data
y = epochs.get_data()[ii, :, :]
assert_array_equal(x, y)
def test_subtract_evoked():
"""Test subtraction of Evoked from Epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
# make sure subraction fails if data channels are missing
assert_raises(ValueError, epochs.subtract_evoked,
epochs.average(picks[:5]))
# do the subraction using the default argument
epochs.subtract_evoked()
# apply SSP now
epochs.apply_proj()
# use preloading and SSP from the start
epochs2 = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True, proj=True)
evoked = epochs2.average()
epochs2.subtract_evoked(evoked)
# this gives the same result
assert_allclose(epochs.get_data(), epochs2.get_data())
# if we compute the evoked response after subtracting it we get zero
zero_evoked = epochs.average()
data = zero_evoked.data
assert_allclose(data, np.zeros_like(data), atol=1e-15)
def test_epoch_eq():
"""Test epoch count equalization and condition combining
"""
raw, events, picks = _get_data()
# equalizing epochs objects
epochs_1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
epochs_2 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
epochs_1.drop_bad_epochs() # make sure drops are logged
assert_true(len([l for l in epochs_1.drop_log if not l]) ==
len(epochs_1.events))
drop_log1 = epochs_1.drop_log = [[] for _ in range(len(epochs_1.events))]
drop_log2 = [[] if l == ['EQUALIZED_COUNT'] else l for l in
epochs_1.drop_log]
assert_true(drop_log1 == drop_log2)
assert_true(len([l for l in epochs_1.drop_log if not l]) ==
len(epochs_1.events))
assert_true(epochs_1.events.shape[0] != epochs_2.events.shape[0])
equalize_epoch_counts([epochs_1, epochs_2], method='mintime')
assert_true(epochs_1.events.shape[0] == epochs_2.events.shape[0])
epochs_3 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
epochs_4 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
equalize_epoch_counts([epochs_3, epochs_4], method='truncate')
assert_true(epochs_1.events.shape[0] == epochs_3.events.shape[0])
assert_true(epochs_3.events.shape[0] == epochs_4.events.shape[0])
# equalizing conditions
epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},
tmin, tmax, picks=picks, reject=reject)
epochs.drop_bad_epochs() # make sure drops are logged
assert_true(len([l for l in epochs.drop_log if not l]) ==
len(epochs.events))
drop_log1 = deepcopy(epochs.drop_log)
old_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
epochs.equalize_event_counts(['a', 'b'], copy=False)
# undo the eq logging
drop_log2 = [[] if l == ['EQUALIZED_COUNT'] else l for l in
epochs.drop_log]
assert_true(drop_log1 == drop_log2)
assert_true(len([l for l in epochs.drop_log if not l]) ==
len(epochs.events))
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(new_shapes[0] == new_shapes[1])
assert_true(new_shapes[2] == new_shapes[2])
assert_true(new_shapes[3] == new_shapes[3])
# now with two conditions collapsed
old_shapes = new_shapes
epochs.equalize_event_counts([['a', 'b'], 'c'], copy=False)
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2])
assert_true(new_shapes[3] == old_shapes[3])
assert_raises(KeyError, epochs.equalize_event_counts, [1, 'a'])
# now let's combine conditions
old_shapes = new_shapes
epochs = epochs.equalize_event_counts([['a', 'b'], ['c', 'd']])[0]
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(old_shapes[0] + old_shapes[1] == new_shapes[0] + new_shapes[1])
assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2] + new_shapes[3])
assert_raises(ValueError, combine_event_ids, epochs, ['a', 'b'],
{'ab': 1})
combine_event_ids(epochs, ['a', 'b'], {'ab': 12}, copy=False)
caught = 0
for key in ['a', 'b']:
try:
epochs[key]
except KeyError:
caught += 1
assert_raises(Exception, caught == 2)
assert_true(not np.any(epochs.events[:, 2] == 1))
assert_true(not np.any(epochs.events[:, 2] == 2))
epochs = combine_event_ids(epochs, ['c', 'd'], {'cd': 34})
assert_true(np.all(np.logical_or(epochs.events[:, 2] == 12,
epochs.events[:, 2] == 34)))
assert_true(epochs['ab'].events.shape[0] == old_shapes[0] + old_shapes[1])
assert_true(epochs['ab'].events.shape[0] == epochs['cd'].events.shape[0])
def test_access_by_name():
"""Test accessing epochs by event name and on_missing for rare events
"""
tempdir = _TempDir()
raw, events, picks = _get_data()
# Test various invalid inputs
assert_raises(ValueError, Epochs, raw, events, {1: 42, 2: 42}, tmin,
tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},
tmin, tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},
tmin, tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, 'foo', tmin, tmax,
picks=picks)
assert_raises(ValueError, Epochs, raw, events, ['foo'], tmin, tmax,
picks=picks)
# Test accessing non-existent events (assumes 12345678 does not exist)
event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
assert_raises(ValueError, Epochs, raw, events, event_id_illegal,
tmin, tmax)
# Test on_missing
assert_raises(ValueError, Epochs, raw, events, 1, tmin, tmax,
on_missing='foo')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='warning')
nw = len(w)
assert_true(1 <= nw <= 2)
Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='ignore')
assert_equal(len(w), nw)
# Test constructing epochs with a list of ints as events
epochs = Epochs(raw, events, [1, 2], tmin, tmax, picks=picks)
for k, v in epochs.event_id.items():
assert_equal(int(k), v)
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
assert_raises(KeyError, epochs.__getitem__, 'bar')
data = epochs['a'].get_data()
event_a = events[events[:, 2] == 1]
assert_true(len(data) == len(event_a))
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks,
preload=True)
assert_raises(KeyError, epochs.__getitem__, 'bar')
temp_fname = op.join(tempdir, 'test-epo.fif')
epochs.save(temp_fname)
epochs2 = read_epochs(temp_fname)
for ep in [epochs, epochs2]:
data = ep['a'].get_data()
event_a = events[events[:, 2] == 1]
assert_true(len(data) == len(event_a))
assert_array_equal(epochs2['a'].events, epochs['a'].events)
epochs3 = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},
tmin, tmax, picks=picks, preload=True)
assert_equal(list(sorted(epochs3[('a', 'b')].event_id.values())),
[1, 2])
epochs4 = epochs['a']
epochs5 = epochs3['a']
assert_array_equal(epochs4.events, epochs5.events)
# 20 is our tolerance because epochs are written out as floats
assert_array_almost_equal(epochs4.get_data(), epochs5.get_data(), 20)
epochs6 = epochs3[['a', 'b']]
assert_true(all(np.logical_or(epochs6.events[:, 2] == 1,
epochs6.events[:, 2] == 2)))
assert_array_equal(epochs.events, epochs6.events)
assert_array_almost_equal(epochs.get_data(), epochs6.get_data(), 20)
# Make sure we preserve names
assert_equal(epochs['a'].name, 'a')
assert_equal(epochs[['a', 'b']]['a'].name, 'a')
@requires_pandas
def test_to_data_frame():
"""Test epochs Pandas exporter"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
assert_raises(ValueError, epochs.to_data_frame, index=['foo', 'bar'])
assert_raises(ValueError, epochs.to_data_frame, index='qux')
assert_raises(ValueError, epochs.to_data_frame, np.arange(400))
df = epochs.to_data_frame(index=['condition', 'epoch', 'time'],
picks=list(range(epochs.info['nchan'])))
# Default index and picks
df2 = epochs.to_data_frame()
assert_equal(df.index.names, df2.index.names)
assert_array_equal(df.columns.values, epochs.ch_names)
data = np.hstack(epochs.get_data())
assert_true((df.columns == epochs.ch_names).all())
assert_array_equal(df.values[:, 0], data[0] * 1e13)
assert_array_equal(df.values[:, 2], data[2] * 1e15)
for ind in ['time', ['condition', 'time'], ['condition', 'time', 'epoch']]:
df = epochs.to_data_frame(index=ind)
assert_true(df.index.names == ind if isinstance(ind, list) else [ind])
# test that non-indexed data were present as categorial variables
assert_array_equal(sorted(df.reset_index().columns[:3]),
sorted(['time', 'condition', 'epoch']))
def test_epochs_proj_mixin():
"""Test SSP proj methods from ProjMixin class
"""
raw, events, picks = _get_data()
for proj in [True, False]:
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj=proj)
assert_true(all(p['active'] == proj for p in epochs.info['projs']))
# test adding / deleting proj
if proj:
epochs.get_data()
assert_true(all(p['active'] == proj for p in epochs.info['projs']))
assert_raises(ValueError, epochs.add_proj, epochs.info['projs'][0],
{'remove_existing': True})
assert_raises(ValueError, epochs.add_proj, 'spam')
assert_raises(ValueError, epochs.del_proj, 0)
else:
projs = deepcopy(epochs.info['projs'])
n_proj = len(epochs.info['projs'])
epochs.del_proj(0)
assert_true(len(epochs.info['projs']) == n_proj - 1)
epochs.add_proj(projs, remove_existing=False)
assert_true(len(epochs.info['projs']) == 2 * n_proj - 1)
epochs.add_proj(projs, remove_existing=True)
assert_true(len(epochs.info['projs']) == n_proj)
# catch no-gos.
# wrong proj argument
assert_raises(ValueError, Epochs, raw, events[:4], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), proj='crazy')
# delayed without reject params
assert_raises(RuntimeError, Epochs, raw, events[:4], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), proj='delayed', reject=None)
for preload in [True, False]:
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj='delayed', preload=preload,
add_eeg_ref=True, reject=reject)
epochs2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj=True, preload=preload,
add_eeg_ref=True, reject=reject)
assert_allclose(epochs.copy().apply_proj().get_data()[0],
epochs2.get_data()[0], rtol=1e-10, atol=1e-25)
# make sure data output is constant across repeated calls
# e.g. drop bads
assert_array_equal(epochs.get_data(), epochs.get_data())
assert_array_equal(epochs2.get_data(), epochs2.get_data())
# test epochs.next calls
data = epochs.get_data().copy()
data2 = np.array([e for e in epochs])
assert_array_equal(data, data2)
# cross application from processing stream 1 to 2
epochs.apply_proj()
assert_array_equal(epochs._projector, epochs2._projector)
assert_allclose(epochs._data, epochs2.get_data())
# test mixin against manual application
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, proj=False, add_eeg_ref=True)
data = epochs.get_data().copy()
epochs.apply_proj()
assert_allclose(np.dot(epochs._projector, data[0]), epochs._data[0])
def test_delayed_epochs():
"""Test delayed projection
"""
raw, events, picks = _get_data()
events = events[:10]
picks = np.concatenate([pick_types(raw.info, meg=True, eeg=True)[::22],
pick_types(raw.info, meg=False, eeg=False,
ecg=True, eog=True)])
picks = np.sort(picks)
raw.info['lowpass'] = 40. # fake the LP info so no warnings
for preload in (True, False):
for proj in (True, False, 'delayed'):
for decim in (1, 3):
for ii in range(2):
epochs = Epochs(raw, events, event_id, tmin, tmax,
picks=picks, proj=proj, reject=reject,
preload=preload, decim=decim)
if ii == 1:
epochs.preload_data()
picks_data = pick_types(epochs.info, meg=True, eeg=True)
evoked = epochs.average(picks=picks_data)
if proj is True:
evoked.apply_proj()
epochs_data = epochs.get_data().mean(axis=0)[picks_data]
assert_array_equal(evoked.ch_names,
np.array(epochs.ch_names)[picks_data])
assert_allclose(evoked.times, epochs.times)
assert_allclose(evoked.data, epochs_data,
rtol=1e-5, atol=1e-15)
def test_drop_epochs():
"""Test dropping of epochs.
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
events1 = events[events[:, 2] == event_id]
# Bound checks
assert_raises(IndexError, epochs.drop_epochs, [len(epochs.events)])
assert_raises(IndexError, epochs.drop_epochs, [-1])
assert_raises(ValueError, epochs.drop_epochs, [[1, 2], [3, 4]])
# Test selection attribute
assert_array_equal(epochs.selection,
np.where(events[:, 2] == event_id)[0])
assert_equal(len(epochs.drop_log), len(events))
assert_true(all(epochs.drop_log[k] == ['IGNORED']
for k in set(range(len(events))) - set(epochs.selection)))
selection = epochs.selection.copy()
n_events = len(epochs.events)
epochs.drop_epochs([2, 4], reason='d')
assert_equal(epochs.drop_log_stats(), 2. / n_events * 100)
assert_equal(len(epochs.drop_log), len(events))
assert_equal([epochs.drop_log[k]
for k in selection[[2, 4]]], [['d'], ['d']])
assert_array_equal(events[epochs.selection], events1[[0, 1, 3, 5, 6]])
assert_array_equal(events[epochs[3:].selection], events1[[5, 6]])
assert_array_equal(events[epochs['1'].selection], events1[[0, 1, 3, 5, 6]])
def test_drop_epochs_mult():
"""Test that subselecting epochs or making less epochs is equivalent"""
raw, events, picks = _get_data()
for preload in [True, False]:
epochs1 = Epochs(raw, events, {'a': 1, 'b': 2},
tmin, tmax, picks=picks, reject=reject,
preload=preload)['a']
epochs2 = Epochs(raw, events, {'a': 1},
tmin, tmax, picks=picks, reject=reject,
preload=preload)
if preload:
# In the preload case you cannot know the bads if already ignored
assert_equal(len(epochs1.drop_log), len(epochs2.drop_log))
for d1, d2 in zip(epochs1.drop_log, epochs2.drop_log):
if d1 == ['IGNORED']:
assert_true(d2 == ['IGNORED'])
if d1 != ['IGNORED'] and d1 != []:
assert_true((d2 == d1) or (d2 == ['IGNORED']))
if d1 == []:
assert_true(d2 == [])
assert_array_equal(epochs1.events, epochs2.events)
assert_array_equal(epochs1.selection, epochs2.selection)
else:
# In the non preload is should be exactly the same
assert_equal(epochs1.drop_log, epochs2.drop_log)
assert_array_equal(epochs1.events, epochs2.events)
assert_array_equal(epochs1.selection, epochs2.selection)
def test_contains():
"""Test membership API"""
raw, events = _get_data()[:2]
tests = [(('mag', False), ('grad', 'eeg')),
(('grad', False), ('mag', 'eeg')),
((False, True), ('grad', 'mag'))]
for (meg, eeg), others in tests:
picks_contains = pick_types(raw.info, meg=meg, eeg=eeg)
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax,
picks=picks_contains, reject=None,
preload=False)
test = 'eeg' if eeg is True else meg
assert_true(test in epochs)
assert_true(not any(o in epochs for o in others))
assert_raises(ValueError, epochs.__contains__, 'foo')
assert_raises(ValueError, epochs.__contains__, 1)
def test_drop_channels_mixin():
"""Test channels-dropping functionality
"""
raw, events = _get_data()[:2]
# here without picks to get additional coverage
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=None,
baseline=(None, 0), preload=True)
drop_ch = epochs.ch_names[:3]
ch_names = epochs.ch_names[3:]
ch_names_orig = epochs.ch_names
dummy = epochs.drop_channels(drop_ch, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, epochs.ch_names)
assert_equal(len(ch_names_orig), epochs.get_data().shape[1])
epochs.drop_channels(drop_ch)
assert_equal(ch_names, epochs.ch_names)
assert_equal(len(ch_names), epochs.get_data().shape[1])
def test_pick_channels_mixin():
"""Test channel-picking functionality
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
ch_names = epochs.ch_names[:3]
epochs.preload = False
assert_raises(RuntimeError, epochs.drop_channels, ['foo'])
epochs.preload = True
ch_names_orig = epochs.ch_names
dummy = epochs.pick_channels(ch_names, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, epochs.ch_names)
assert_equal(len(ch_names_orig), epochs.get_data().shape[1])
epochs.pick_channels(ch_names)
assert_equal(ch_names, epochs.ch_names)
assert_equal(len(ch_names), epochs.get_data().shape[1])
# Invalid picks
assert_raises(ValueError, Epochs, raw, events, event_id, tmin, tmax,
picks=[])
def test_equalize_channels():
"""Test equalization of channels
"""
raw, events, picks = _get_data()
epochs1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj=False, preload=True)
epochs2 = epochs1.copy()
ch_names = epochs1.ch_names[2:]
epochs1.drop_channels(epochs1.ch_names[:1])
epochs2.drop_channels(epochs2.ch_names[1:2])
my_comparison = [epochs1, epochs2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
def test_illegal_event_id():
"""Test handling of invalid events ids"""
raw, events, picks = _get_data()
event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
assert_raises(ValueError, Epochs, raw, events, event_id_illegal, tmin,
tmax, picks=picks, baseline=(None, 0), proj=False)
def test_add_channels_epochs():
"""Test adding channels"""
raw, events, picks = _get_data()
def make_epochs(picks, proj):
return Epochs(raw, events, event_id, tmin, tmax, baseline=(None, 0),
reject=None, preload=True, proj=proj, picks=picks)
picks = pick_types(raw.info, meg=True, eeg=True, exclude='bads')
picks_meg = pick_types(raw.info, meg=True, eeg=False, exclude='bads')
picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
for proj in (False, True):
epochs = make_epochs(picks=picks, proj=proj)
epochs_meg = make_epochs(picks=picks_meg, proj=proj)
epochs_eeg = make_epochs(picks=picks_eeg, proj=proj)
epochs.info._check_consistency()
epochs_meg.info._check_consistency()
epochs_eeg.info._check_consistency()
epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])
assert_equal(len(epochs.info['projs']), len(epochs2.info['projs']))
assert_equal(len(epochs.info.keys()), len(epochs2.info.keys()))
data1 = epochs.get_data()
data2 = epochs2.get_data()
data3 = np.concatenate([e.get_data() for e in
[epochs_meg, epochs_eeg]], axis=1)
assert_array_equal(data1.shape, data2.shape)
assert_allclose(data1, data3, atol=1e-25)
assert_allclose(data1, data2, atol=1e-25)
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['meas_date'] += 10
add_channels_epochs([epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs2.info['filename'] = epochs2.info['filename'].upper()
epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.events[3, 2] -= 1
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
assert_raises(ValueError, add_channels_epochs,
[epochs_meg, epochs_eeg[:2]])
epochs_meg.info['chs'].pop(0)
epochs_meg.info['ch_names'].pop(0)
epochs_meg.info['nchan'] -= 1
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['sfreq'] = None
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['sfreq'] += 10
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['ch_names'][1] = epochs_meg2.info['ch_names'][0]
epochs_meg2.info['chs'][1]['ch_name'] = epochs_meg2.info['ch_names'][1]
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['dev_head_t']['to'] += 1
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['dev_head_t']['to'] += 1
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['expimenter'] = 'foo'
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.preload = False
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.tmin += 0.4
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.tmin += 0.5
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.baseline = None
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.event_id['b'] = 2
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
def test_array_epochs():
"""Test creating epochs from array
"""
import matplotlib.pyplot as plt
tempdir = _TempDir()
# creating
rng = np.random.RandomState(42)
data = rng.random_sample((10, 20, 300))
sfreq = 1e3
ch_names = ['EEG %03d' % (i + 1) for i in range(20)]
types = ['eeg'] * 20
info = create_info(ch_names, sfreq, types)
events = np.c_[np.arange(1, 600, 60),
np.zeros(10, int),
[1, 2] * 5]
event_id = {'a': 1, 'b': 2}
epochs = EpochsArray(data, info, events, tmin, event_id)
assert_true(str(epochs).startswith('<EpochsArray'))
# From GH#1963
assert_raises(ValueError, EpochsArray, data[:-1], info, events, tmin,
event_id)
assert_raises(ValueError, EpochsArray, data, info, events, tmin,
dict(a=1))
# saving
temp_fname = op.join(tempdir, 'test-epo.fif')
epochs.save(temp_fname)
epochs2 = read_epochs(temp_fname)
data2 = epochs2.get_data()
assert_allclose(data, data2)
assert_allclose(epochs.times, epochs2.times)
assert_equal(epochs.event_id, epochs2.event_id)
assert_array_equal(epochs.events, epochs2.events)
# plotting
epochs[0].plot()
plt.close('all')
# indexing
assert_array_equal(np.unique(epochs['a'].events[:, 2]), np.array([1]))
assert_equal(len(epochs[:2]), 2)
data[0, 5, 150] = 3000
data[1, :, :] = 0
data[2, 5, 210] = 3000
data[3, 5, 260] = 0
epochs = EpochsArray(data, info, events=events, event_id=event_id,
tmin=0, reject=dict(eeg=1000), flat=dict(eeg=1e-1),
reject_tmin=0.1, reject_tmax=0.2)
assert_equal(len(epochs), len(events) - 2)
assert_equal(epochs.drop_log[0], ['EEG 006'])
assert_equal(len(epochs.drop_log), 10)
assert_equal(len(epochs.events), len(epochs.selection))
# baseline
data = np.ones((10, 20, 300))
epochs = EpochsArray(data, info, events=events, event_id=event_id,
tmin=-.2, baseline=(None, 0))
ep_data = epochs.get_data()
assert_array_equal(np.zeros_like(ep_data), ep_data)
# one time point
epochs = EpochsArray(data[:, :, :1], info, events=events,
event_id=event_id, tmin=0., baseline=None)
assert_allclose(epochs.times, [0.])
assert_allclose(epochs.get_data(), data[:, :, :1])
epochs.save(temp_fname)
epochs_read = read_epochs(temp_fname)
assert_allclose(epochs_read.times, [0.])
assert_allclose(epochs_read.get_data(), data[:, :, :1])
# event as integer (#2435)
mask = (events[:, 2] == 1)
data_1 = data[mask]
events_1 = events[mask]
epochs = EpochsArray(data_1, info, events=events_1, event_id=1,
tmin=-0.2, baseline=(None, 0))
def test_concatenate_epochs():
"""Test concatenate epochs"""
raw, events, picks = _get_data()
epochs = Epochs(
raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks)
epochs2 = epochs.copy()
epochs_list = [epochs, epochs2]
epochs_conc = concatenate_epochs(epochs_list)
assert_array_equal(
epochs_conc.events[:, 0], np.unique(epochs_conc.events[:, 0]))
expected_shape = list(epochs.get_data().shape)
expected_shape[0] *= 2
expected_shape = tuple(expected_shape)
assert_equal(epochs_conc.get_data().shape, expected_shape)
assert_equal(epochs_conc.drop_log, epochs.drop_log * 2)
epochs2 = epochs.copy()
epochs2._data = epochs2.get_data()
epochs2.preload = True
assert_raises(
ValueError, concatenate_epochs,
[epochs, epochs2.drop_channels(epochs2.ch_names[:1], copy=True)])
epochs2.times = np.delete(epochs2.times, 1)
assert_raises(
ValueError,
concatenate_epochs, [epochs, epochs2])
assert_equal(epochs_conc._raw, None)
# check if baseline is same for all epochs
epochs2.baseline = (-0.1, None)
assert_raises(ValueError, concatenate_epochs, [epochs, epochs2])
def test_add_channels():
"""Test epoch splitting / re-appending channel types
"""
raw, events, picks = _get_data()
epoch_nopre = Epochs(
raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks)
epoch = Epochs(
raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks, preload=True)
epoch_eeg = epoch.pick_types(meg=False, eeg=True, copy=True)
epoch_meg = epoch.pick_types(meg=True, copy=True)
epoch_stim = epoch.pick_types(meg=False, stim=True, copy=True)
epoch_eeg_meg = epoch.pick_types(meg=True, eeg=True, copy=True)
epoch_new = epoch_meg.add_channels([epoch_eeg, epoch_stim], copy=True)
assert_true(all(ch in epoch_new.ch_names
for ch in epoch_stim.ch_names + epoch_meg.ch_names))
epoch_new = epoch_meg.add_channels([epoch_eeg], copy=True)
assert_true(ch in epoch_new.ch_names for ch in epoch.ch_names)
assert_array_equal(epoch_new._data, epoch_eeg_meg._data)
assert_true(all(ch not in epoch_new.ch_names
for ch in epoch_stim.ch_names))
# Now test errors
epoch_badsf = epoch_eeg.copy()
epoch_badsf.info['sfreq'] = 3.1415927
epoch_eeg = epoch_eeg.crop(-.1, .1)
assert_raises(AssertionError, epoch_meg.add_channels, [epoch_nopre])
assert_raises(RuntimeError, epoch_meg.add_channels, [epoch_badsf])
assert_raises(AssertionError, epoch_meg.add_channels, [epoch_eeg])
assert_raises(ValueError, epoch_meg.add_channels, [epoch_meg])
assert_raises(AssertionError, epoch_meg.add_channels, epoch_badsf)
run_tests_if_main()
| bsd-3-clause |
yl565/statsmodels | statsmodels/examples/ex_scatter_ellipse.py | 39 | 1367 | '''example for grid of scatter plots with probability ellipses
Author: Josef Perktold
License: BSD-3
'''
from statsmodels.compat.python import lrange
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.graphics.plot_grids import scatter_ellipse
nvars = 6
mmean = np.arange(1.,nvars+1)/nvars * 1.5
rho = 0.5
#dcorr = rho*np.ones((nvars, nvars)) + (1-rho)*np.eye(nvars)
r = np.random.uniform(-0.99, 0.99, size=(nvars, nvars))
##from scipy import stats
##r = stats.rdist.rvs(1, size=(nvars, nvars))
r = (r + r.T) / 2.
assert np.allclose(r, r.T)
mcorr = r
mcorr[lrange(nvars), lrange(nvars)] = 1
#dcorr = np.array([[1, 0.5, 0.1],[0.5, 1, -0.2], [0.1, -0.2, 1]])
mstd = np.arange(1.,nvars+1)/nvars
mcov = mcorr * np.outer(mstd, mstd)
evals = np.linalg.eigvalsh(mcov)
assert evals.min > 0 #assert positive definite
nobs = 100
data = np.random.multivariate_normal(mmean, mcov, size=nobs)
dmean = data.mean(0)
dcov = np.cov(data, rowvar=0)
print(dmean)
print(dcov)
dcorr = np.corrcoef(data, rowvar=0)
dcorr[np.triu_indices(nvars)] = 0
print(dcorr)
#default
#fig = scatter_ellipse(data, level=[0.5, 0.75, 0.95])
#used for checking
#fig = scatter_ellipse(data, level=[0.5, 0.75, 0.95], add_titles=True, keep_ticks=True)
#check varnames
varnames = ['var%d' % i for i in range(nvars)]
fig = scatter_ellipse(data, level=0.9, varnames=varnames)
plt.show()
| bsd-3-clause |
ijmarshall/cochrane-nlp | quality4.py | 1 | 73371 | from tokenizer import sent_tokenizer, word_tokenizer
import biviewer
import pdb
import re
import progressbar
import collections
import string
from unidecode import unidecode
import codecs
import yaml
from pprint import pprint
import numpy as np
import math
import difflib
import sklearn
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.grid_search import GridSearchCV
from sklearn.feature_extraction import DictVectorizer
from sklearn import cross_validation
from sklearn import metrics
from sklearn import svm
from sklearn.linear_model import SGDClassifier
from sklearn.externals import six
from collections import defaultdict
from sklearn.metrics import precision_recall_fscore_support
import random
import operator
from sklearn.cross_validation import KFold
from journalreaders import PdfReader
import cPickle as pickle
from sklearn.metrics import f1_score, make_scorer, fbeta_score, accuracy_score
import nltk
from nltk.corpus import stopwords
REGEX_QUOTE_PRESENT = re.compile("Quote\:")
REGEX_QUOTE = re.compile("\"(.*?)\"") # retrive blocks of text in quotes
REGEX_ELLIPSIS = re.compile("\s*[\[\(]?\s?\.\.+\s?[\]\)]?\s*") # to catch various permetations of "..." and "[...]"
SIMPLE_WORD_TOKENIZER = re.compile("[a-zA-Z]{2,}") # regex of the rule used by sklearn CountVectorizer
CORE_DOMAINS = ["Random sequence generation", "Allocation concealment", "Blinding of participants and personnel",
"Blinding of outcome assessment", "Incomplete outcome data", "Selective reporting"]
# "OTHER" is generated in code, not in the mapping file
# see data/domain_names.txt for various other criteria
# all of these are available via QualityQuoteReader
ALL_DOMAINS = CORE_DOMAINS[:] # will be added to later
RoB_CLASSES = ["YES", "NO", "UNKNOWN"]
STOP_WORDS = set(stopwords.words('english'))
# @TODO move me
domain_str = lambda d: d.lower().replace(" ", "_")
def show_most_informative_features(vectorizer, clf, n=1000):
###
# note that in the multi-class case, clf.coef_ will
# have k weight vectors, which I believe are one per
# each class (i.e., each is a classifier discriminating
# one class versus the rest).
c_f = sorted(zip(clf.coef_[0], vectorizer.get_feature_names()))
if n == 0:
n = len(c_f)/2
top = zip(c_f[:n], c_f[:-(n+1):-1])
print
print "%d most informative features:" % (n, )
out_str = []
for (c1, f1), (c2, f2) in top:
out_str.append("\t%.4f\t%-15s\t\t%.4f\t%-15s" % (c1, f1, c2, f2))
feature_str = "\n".join(out_str)
return feature_str
def show_most_informative_features_ynu(vectorizer, clf, n=10):
###
# note that in the multi-class case, clf.coef_ will
# have k weight vectors, which I believe are one per
# each class (i.e., each is a classifier discriminating
# one class versus the rest).
combinations = ["NO vs (YES + UNKNOWN)", "UNKNOWN vs (YES + NO)", "YES vs (NO + UNKNOWN)"]
out_str = []
for i, combination in enumerate(combinations):
out_str.append(combination)
out_str.append("*" * 20)
c_f = sorted(zip(clf.coef_[i], vectorizer.get_feature_names()))
if n == 0:
n = len(c_f)/2
top = zip(c_f[:n], c_f[:-(n+1):-1])
for (c1, f1), (c2, f2) in top:
out_str.append("\t%.4f\t%-15s\t\t%.4f\t%-15s" % (c1, f1, c2, f2))
feature_str = "\n".join(out_str)
return feature_str
def load_domain_map(filename="data/domain_names.txt"):
with codecs.open(filename, 'rb', 'utf-8') as f:
raw_data = yaml.load(f)
mapping = {}
for key, value in raw_data.iteritems():
for synonym in value:
mapping[synonym] = key
return mapping
class QualityQuoteReader2():
"""
iterates through Cochrane Risk of Bias information
v2 maintains unique ids for all source studies + does not filter by whether a quote is present
returns list of quotes where they are available, else none
"""
def __init__(self, sent=False, test_mode=False):
self.BiviewerView = collections.namedtuple('BiViewer_View', ['uid', 'cochrane', 'studypdf'])
self.pdfviewer = biviewer.PDFBiViewer()
self.domain_map = load_domain_map()
if test_mode:
self.test_mode_break_point = 500
else:
self.test_mode_break_point = None
def __iter__(self):
"""
run through PDF/Cochrane data
preprocesses PDF text
and maps domain title to one of the core Risk of Bias domains if possible
"""
progress_bar_limit = len(self.pdfviewer) if self.test_mode_break_point is None else self.test_mode_break_point
p = progressbar.ProgressBar(progress_bar_limit, timer=True)
for uid, study in enumerate(self.pdfviewer):
if self.test_mode_break_point and (uid >= self.test_mode_break_point):
break
p.tap()
quality_data = study.cochrane["QUALITY"]
for domain in quality_data:
domain["QUOTES"] = self.preprocess_cochrane(domain["DESCRIPTION"])
try:
domain["DOMAIN"] = self.domain_map[domain["DOMAIN"]] # map domain titles to our core categories
except:
domain["DOMAIN"] = "OTHER"
yield self.BiviewerView(uid=uid, cochrane={"QUALITY": quality_data}, studypdf=self.preprocess_pdf(study.studypdf))
def __len__(self):
return len(self.pdfviewer) if self.test_mode_break_point is None else self.test_mode_break_point
def preprocess_pdf(self, pdftext):
pdftext = unidecode(pdftext)
pdftext = re.sub("\n", " ", pdftext) # preprocessing rule 1
return pdftext
def preprocess_cochrane(self, rawtext):
# regex clean up of cochrane strings
processedtext = unidecode(rawtext)
processedtext = re.sub(" +", " ", processedtext)
# extract all parts in quotes
quotes = REGEX_QUOTE.findall(processedtext)
# then split at any ellipses
quote_parts = []
for quote in quotes:
quote_parts.extend(REGEX_ELLIPSIS.split(quote))
return quote_parts
class PDFMatcher():
"""
matches and generates sent tokens from pdf text
"""
def __init__(self, quotes=None, pdftext=None):
# load a sequence matcher; turn autojunk off (since buggy for long strings)
self.sequencematcher = difflib.SequenceMatcher(None, autojunk=False)
if quotes:
self.quotes = self.load_quotes(quotes)
if pdftext:
self.pdftext = self.load_pdftext(pdftext)
def load_quotes(self, quotes):
self.quotes = quotes
def load_pdftext(self, pdftext):
self.pdftext = pdftext
self.lenpdf = len(pdftext)
self.sequencematcher.set_seq2(self.pdftext)
self.sent_indices = sent_tokenizer.span_tokenize(self.pdftext)
def _overlap(self, t1, t2):
"""
finds out whether two tuples overlap
"""
t1s, t1e = t1
t2s, t2e = t2
# true if either start of t1 is inside t2, or start of t2 is inside t1
return (t2s <= t1s <= t2e) or (t1s <= t2s <= t1e)
def generate_X(self):
X = []
# go through sentence indices
# make X (list of sentences)
for (start_i, end_i) in self.sent_indices:
X.append(self.pdftext[start_i: end_i])
return X
def generate_y(self, min_char_match=20):
"""
returns X: list of sentence strings
y: numpy vector of 1, -1 (for positive/negative examples)
"""
good_match = False # this will be set to True if sufficent matching characters in
# at least one of the parts of the quotes
match_indices = []
# go through quotes, match using difflib
# and keep any matches which are long enough so likely true matches
for quote in self.quotes:
self.sequencematcher.set_seq1(quote)
best_match = self.sequencematcher.find_longest_match(0, len(quote), 0, self.lenpdf)
# only interested in good quality matches
if best_match.size > min_char_match:
good_match = True
match_indices.append((best_match.b, best_match.b + best_match.size)) # add (start_i, end_i) tuples (of PDF indices)
y = []
if not good_match:
# if quality criteria not met, leave here
# (i.e. return empty lists [], [])
return y
# otherwise continue and generate feature and answer vectors
# get indices of sentences (rather than split)
sent_indices = sent_tokenizer.span_tokenize(self.pdftext)
# go through sentence indices
# make X (list of sentences)
# and calculate y, if there is *any* overlap with matched quoted text then
# y = True
for (start_i, end_i) in sent_indices:
# if any overlaps with quotes, then y = True, else False
if any((self._overlap((start_i, end_i), match_tuple) for match_tuple in match_indices)):
y.append(1)
else:
y.append(-1)
return y
class SentenceModel():
"""
predicts whether sentences contain risk of bias information
- uses data from Cochrane quotes only
"""
def __init__(self, test_mode=False):
self.quotereader = QualityQuoteReader2(test_mode=test_mode) # this now runs through all studies
def generate_data(self, uid_filter=None):
"""
tokenizes and processes the raw text from pdfs and cochrane
saves in self.X_list and self.y_list (both dicts)
"""
test_domains = CORE_DOMAINS # may change later to access various "other" domains
# one feature matrix X across all domains
self.X_list = [] # key will be unique id, value will be text
# but multiple y vectors; one for each test domain
self.y_list = {domain: [] for domain in test_domains}
self.y_judgements = {domain: [] for domain in test_domains} # used in subclasses to make hybrid models
self.X_uids = []
self.y_uids = {domain: [] for domain in test_domains}
for uid, cochrane_data, pdf_data in self.quotereader:
if uid_filter is not None and uid not in uid_filter:
continue
matcher = PDFMatcher()
matcher.load_pdftext(pdf_data)
X_study = matcher.generate_X()
self.X_list.extend(X_study)
self.X_uids.extend([uid] * len(X_study))
domains_done_already = [] # for this particular study
# (we're ignoring multiple quotes per domain at the moment and getting the first...)
for domain in cochrane_data["QUALITY"]:
if domain["DOMAIN"] not in test_domains or domain["DOMAIN"] in domains_done_already:
continue # skip if a domain is repeated in a study (though note that this is likely due to different RoB per *outcome* which is ignored here)
if domain["QUOTES"]:
matcher.load_quotes(domain["QUOTES"])
y_study = matcher.generate_y()
self.y_list[domain["DOMAIN"]].extend(y_study)
self.y_uids[domain["DOMAIN"]].extend([uid] * len(y_study))
self.y_judgements[domain["DOMAIN"]].extend([domain["RATING"]] * len(y_study))
domains_done_already.append(domain["DOMAIN"])
self.y = {domain: np.array(self.y_list[domain]) for domain in test_domains}
self.X_uids = np.array(self.X_uids)
self.y_uids = {domain: np.array(self.y_uids[domain]) for domain in test_domains}
self.y_judgements = {domain: np.array(self.y_judgements[domain]) for domain in test_domains}
# self.vectorize()
def vectorize(self):
self.vectorizer = ModularCountVectorizer()
# self.X = self.vectorizer.fit_transform(self.X_list, max_features=50000)
self.X = self.vectorizer.fit_transform(self.X_list)
def load_text(self, filename):
"""
loads the original text of all PDFs, to debugging and looking at predicted text from corpus
NB this is a large file
"""
with open(filename, 'rb') as f:
self.X_list = pickle.load(f)
def __len__(self):
"""
returns the total number of studies (not features)
"""
return len(self.quotereader)
def len_domain(self, domain):
return len(np.unique(self.y_uids[domain]))
def domain_X_filter(self, domain):
"""
returns X_filter for a domain
"""
y_study_uids = np.unique(self.y_uids[domain])
X_filter = np.nonzero([(X_uid in y_study_uids) for X_uid in self.X_uids])[0]
return X_filter
def domain_uids(self, domain):
unique_study_ids = np.unique(self.y_uids[domain])
return unique_study_ids
def X_y_uid_filtered(self, uids, domain):
X_all = self.X_domain_all(domain=domain)
y_all = self.y_domain_all(domain=domain)
filter_ids = np.nonzero([(y_study_id in uids) for y_study_id in self.y_uids[domain]])[0]
X_filtered = X_all[filter_ids]
y_filtered = y_all[filter_ids]
return X_filtered, y_filtered
def get_all_domains(self):
return self.y.keys()
def X_get_sentence(self, select_sent_id, domain):
y_study_ids = np.unique(self.y[domain].study_ids)
X_filter = np.nonzero([X_study_id in y_study_ids for X_study_id in self.X.study_ids])[0]
return self.X_list.data[X_filter[select_sent_id]]
def X_domain_all(self, domain):
"""
retrieve X data for a domain
"""
X_filter = self.domain_X_filter(domain)
return self.X[X_filter]
def y_domain_all(self, domain):
return self.y[domain]
# def X_y_filtered(self, filter_ids, domain):
# X_all = self.X_domain_all(domain=domain)
# y_all = self.y_domain_all(domain=domain)
# # np.unique always returns ordered ids
# unique_study_ids = np.unique(self.y_uids[domain])
# mapped_ids = [unique_study_ids[filter_id] for filter_id in filter_ids]
# filter_ids = np.nonzero([(y_study_id in mapped_ids) for y_study_id in self.y_uids[domain]])[0]
# X_filtered = X_all[filter_ids]
# y_filtered = y_all[filter_ids]
# return X_filtered, y_filtered
class DocumentLevelModel(SentenceModel):
"""
for predicting the risk of bias
as "HIGH", "LOW", or "UNKNOWN" for a document
using a binary bag-of-words as features for each document
"""
def generate_data(self, uid_filter=None, binarize=False):
"""
tokenizes and processes the raw text from pdfs and cochrane
saves in self.X_list and self.y_list (both dicts)
"""
test_domains = CORE_DOMAINS # may change later to access various "other" domains
# one feature matrix X across all domains
self.X_list = [] # key will be unique id, value will be text
# but multiple y vectors; one for each test domain
self.y_list = {domain: [] for domain in test_domains}
self.X_uids = []
self.y_uids = {domain: [] for domain in test_domains}
for uid, cochrane_data, pdf_data in self.quotereader:
if uid_filter is not None and uid not in uid_filter:
continue
X_study = [pdf_data] # this time the X is the whole pdf data
self.X_list.extend(X_study)
self.X_uids.extend([uid] * len(X_study))
domains_done_already = [] # for this particular study
# (we're ignoring multiple quotes per domain at the moment and getting the first...)
for domain in cochrane_data["QUALITY"]:
if domain["DOMAIN"] not in test_domains or domain["DOMAIN"] in domains_done_already:
continue # skip if a domain is repeated in a study (though note that this is likely due to different RoB per *outcome* which is ignored here)
if binarize:
y_study = 1 if domain["RATING"]=="YES" else -1 # binarize
else:
y_study = domain["RATING"]
self.y_list[domain["DOMAIN"]].append(y_study)
self.y_uids[domain["DOMAIN"]].append(uid)
domains_done_already.append(domain["DOMAIN"])
self.y = {domain: np.array(self.y_list[domain]) for domain in test_domains}
self.X_uids = np.array(self.X_uids)
self.y_uids = {domain: np.array(self.y_uids[domain]) for domain in test_domains}
class MultiTaskDocumentModel(DocumentLevelModel):
'''
The idea here is to train a single model across all domains. Basically:
y_ij = sign{(w0 + w_j) * x_i}
for document x_i, where a w_j is learned for each domain and w0 is a shared
weight vector (across domains).
'''
def vectorize(self):
self.vectorizer = ModularCountVectorizer()
self.vectorizer.builder_clear()
self.X_mt_labels = [] # which rows correspond to which doc/interactions?
self.y_mt = []
self.uids_to_row_indices = {}
self.row_indices_to_uids, self.row_indices_to_domains = [], []
# number of rows in the 'multi-task' matrix, which
# will vary depending on how many docs have labels
# for how many domains
n_rows = 0 # (equal to len(self.X_mt_labels)
'''
the vectorizer wants all the documents at once,
so here we are going to build them up. we're only
going to add interaction copies for a given document
for those domains that we have an associated label.
'''
docs = []
# which indices in docs correspond to copies for
# which domains?
domains_to_interaction_doc_indices = defaultdict(list)
for i, doc in enumerate(self.X_list):
# `intercept' document
uid = self.X_uids[i]
# add |CORE_DOMAINS| copies for each instance.
for domain in CORE_DOMAINS:
d_str = domain_str(domain)
if uid in self.domain_uids(domain):
# get the label (note that we match up the
# uid to do this)
uids_to_lbls = dict(zip(self.y_uids[domain],
self.y_domain_all(domain=domain)))
#y_index = self.y_uids(domain).index(uid)
#domain_ys = self.y_domain_all(domain=domain)
#self.y_mt.append(domain_ys[y_index])
self.y_mt.append(uids_to_lbls[uid])
# append interaction copy of doc
docs.append(doc)
self.row_indices_to_uids.append(uid)
self.row_indices_to_domains.append(domain)
self.X_mt_labels.append("%s-%s" % (i, d_str))
domains_to_interaction_doc_indices[d_str].append(n_rows)
n_rows += 1
'''
now actually ad documents and interaction copies to
the vectorizer.
'''
#for i, doc in enumerate(self.X_list):
# `intercept' document
self.vectorizer.builder_add_docs(docs)
for domain in CORE_DOMAINS:
d_str = domain_str(domain)
interaction_list = []
for i in xrange(len(docs)):
if i in domains_to_interaction_doc_indices[d_str]:
interaction_list.append(docs[i])
else:
interaction_list.append("")
self.vectorizer.builder_add_docs(interaction_list, prefix=d_str+"-")
# BCW -- attempting to upper bound features!
# note that this will keep the <max_features> most common
# features, regardless of whether or not they are 'interaction'
# features
# self.X = self.vectorizer.builder_fit_transform(max_features=50000)
self.X = self.vectorizer.builder_fit_transform(low=2)
def X_y_uid_filtered(self, uids, domain=None):
X_indices, y = [], []
for i in xrange(self.X.shape[0]):
if domain is None and self.row_indices_to_uids[i] in uids:
# if domain is None, return big multi-task design matrix
# -- you'll want to do this, e.g., for training
X_indices.append(i)
y.append(self.y_mt[i])
elif domain == self.row_indices_to_domains[i] and self.row_indices_to_uids[i] in uids:
# otherwise (when domain is not None), return
# instances for only the target domain
# (e.g., for testing)
X_indices.append(i)
y.append(self.y_mt[i])
return self.X[X_indices], y
class MultiTaskHybridDocumentModel(MultiTaskDocumentModel):
'''
same as the MultiTaskDocumentModel, except takes in sentence
level modelling too into the mix
'''
def vectorize(self):
self.vectorizer = ModularCountVectorizer()
self.vectorizer.builder_clear()
self.X_mt_labels = [] # which rows correspond to which doc/interactions?
self.y_mt = []
self.uids_to_row_indices = {}
self.row_indices_to_uids, self.row_indices_to_domains = [], []
# number of rows in the 'multi-task' matrix, which
# will vary depending on how many docs have labels
# for how many domains
n_rows = 0 # (equal to len(self.X_mt_labels)
'''
the vectorizer wants all the documents at once,
so here we are going to build them up. we're only
going to add interaction copies for a given document
for those domains that we have an associated label.
'''
docs = []
high_prob_sents = defaultdict(list)
# which indices in docs correspond to copies for
# which domains?
domains_to_interaction_doc_indices = defaultdict(list)
for i, doc in enumerate(self.X_list):
# `intercept' document
uid = self.X_uids[i]
# add |CORE_DOMAINS| copies for each instance.
for domain in CORE_DOMAINS:
d_str = domain_str(domain)
if uid in self.domain_uids(domain):
# get the label (note that we match up the
# uid to do this)
uids_to_lbls = dict(zip(self.y_uids[domain],
self.y_domain_all(domain=domain)))
#y_index = self.y_uids(domain).index(uid)
#domain_ys = self.y_domain_all(domain=domain)
#self.y_mt.append(domain_ys[y_index])
self.y_mt.append(uids_to_lbls[uid])
# append interaction copy of doc
docs.append(doc)
high_prob_sents[domain].append(self.get_sent_predictions_for_doc(doc, domain))
for high_prob_domain in CORE_DOMAINS:
if high_prob_domain != domain:
high_prob_sents[high_prob_domain].append("")
self.row_indices_to_uids.append(uid)
self.row_indices_to_domains.append(domain)
self.X_mt_labels.append("%s-%s" % (i, d_str))
domains_to_interaction_doc_indices[d_str].append(n_rows)
n_rows += 1
'''
now actually add documents and interaction copies to
the vectorizer.
'''
#for i, doc in enumerate(self.X_list):
# `intercept' document
self.vectorizer.builder_add_docs(docs)
for domain in CORE_DOMAINS:
d_str = domain_str(domain)
interaction_list, sent_interaction_list = [], []
for i in xrange(len(docs)):
if i in domains_to_interaction_doc_indices[d_str]:
interaction_list.append(docs[i])
sent_interaction_list.append(high_prob_sents[domain][i])
else:
interaction_list.append("")
sent_interaction_list.append("")
self.vectorizer.builder_add_docs(interaction_list, prefix=d_str+"-doc-")
self.vectorizer.builder_add_docs(sent_interaction_list, prefix=d_str+"-sent-")
self.X = self.vectorizer.builder_fit_transform(max_features=200000, low=3)
# self.X = self.vectorizer.builder_fit_transform(max_features=50000)
####
# maybe record the feature indices here that are to receive
# different 'amounts' of regularization
####
def get_sent_predictions_for_doc(self, doc, domain):
# tokenize into sentences
sents = sent_tokenizer.tokenize(doc)
# vectorize the sentences
X_sents = self.sent_vectorizer.transform(sents)
# get predicted 1 / -1 for the sentences
pred_class = self.sent_clfs[domain].predict(X_sents)
# get the sentences which are predicted 1
positive_sents = [sent for sent, pred in zip(sents, pred_class) if pred==1]
# make a single string per doc
rob_sents = " ".join(positive_sents)
return rob_sents
def set_sent_model(self, sent_clfs, sent_vectorizer):
"""
set a model which takes in a list of sentences;
and returns -1 or 1
"""
self.sent_clfs = sent_clfs
self.sent_vectorizer = sent_vectorizer
class HybridDocModel(DocumentLevelModel):
"""
for predicting the risk of bias
as "HIGH", "LOW", or "UNKNOWN" for a document
using a binary bag-of-words as features for each document
"""
def vectorize(self, domain=None):
if domain is None:
raise TypeError("this class requires domain specific vectorization")
self.vectorizer = ModularCountVectorizer()
self.vectorizer.builder_clear()
X_filter = self.domain_X_filter(domain)
predictions = self.get_sent_predictions_for_domain(domain)
self.vectorizer.builder_add_docs([self.X_list[i] for i in X_filter])
self.vectorizer.builder_add_docs(predictions, prefix="high-prob-sent-", weighting=10)
self.X = self.vectorizer.builder_fit_transform()
def get_sent_predictions_for_domain(self, domain):
uids = self.domain_uids(domain)
predictions = []
for uid in uids:
# get the index of the study with specified uid
study_index = np.nonzero(self.X_uids==uid)[0][0]
# tokenize into sentences
sents = sent_tokenizer.tokenize(self.X_list[study_index])
# vectorize the sentences
X_sents = self.sent_vectorizer.transform(sents)
# get predicted 1 / -1 for the sentences
pred_class = self.sent_clf.predict(X_sents)
# get the sentences which are predicted 1
positive_sents = [sent for sent, pred in zip(sents, pred_class) if pred==1]
# make a single string per doc
doc = " ".join(positive_sents)
predictions.append(doc)
return predictions
def set_sent_model(self, doc_clf, doc_vectorizer):
"""
set a model which takes in a list of sentences;
and returns -1 or 1
"""
self.sent_clf = doc_clf
self.sent_vectorizer = doc_vectorizer
def X_y_uid_filtered(self, uids, domain):
X_all = self.X
y_all = self.y_domain_all(domain=domain)
filter_ids = np.nonzero([(y_study_id in uids) for y_study_id in self.y_uids[domain]])[0]
X_filtered = X_all[filter_ids]
y_filtered = y_all[filter_ids]
return X_filtered, y_filtered
# class HybridDocModel2(HybridDocModel):
# """
# for predicting the risk of bias
# as "HIGH", "LOW", or "UNKNOWN" for a document
# using a binary bag-of-words as features for each document
# """
# def vectorize(self, domain=None):
# if domain is None:
# raise TypeError("this class requires domain specific vectorization")
# self.vectorizer = ModularCountVectorizer()
# self.vectorizer.builder_clear()
# X_filter = self.domain_X_filter(domain)
# predictions = self.get_sent_predictions_for_domain(domain)
# # self.vectorizer.builder_add_docs([self.X_list[i] for i in X_filter])
# self.vectorizer.builder_add_docs(predictions, prefix="high-prob-sent-")
# self.X = self.vectorizer.builder_fit_transform()
class HybridModel(SentenceModel):
"""
predicts whether sentences contain risk of bias information
- uses real RoB judgements
"""
def vectorize(self, domain=None, interaction_classes=["YES", "NO"]):
if domain is None:
raise TypeError("this class requires domain specific vectorization")
self.vectorizer = ModularCountVectorizer()
self.vectorizer.builder_clear()
X_filter = self.domain_X_filter(domain)
sents = [self.X_list[i] for i in X_filter]
self.vectorizer.builder_add_docs(sents)
for interaction_class in interaction_classes:
self.vectorizer.builder_add_interaction_features(
sents, self.y_judgements[domain]==interaction_class, prefix="rob-" + interaction_class + "-")
self.X = self.vectorizer.builder_fit_transform()
def X_y_uid_filtered(self, uids, domain):
X_all = self.X
y_all = self.y_domain_all(domain=domain)
filter_ids = np.nonzero([(y_study_id in uids) for y_study_id in self.y_uids[domain]])[0]
X_filtered = X_all[filter_ids]
y_filtered = y_all[filter_ids]
return X_filtered, y_filtered
class HybridModelProbablistic(HybridModel):
"""
predicts whether sentences contain risk of bias information
- requires a model to be passed in which can predice RoB judgements from
full text document
"""
def vectorize(self, domain=None, interaction_classes=["YES", "NO"], use_vectorizer=None):
if domain is None:
raise TypeError("this class requires domain specific vectorization")
if use_vectorizer is None:
self.vectorizer = ModularCountVectorizer()
else:
self.vectorizer = use_vectorizer
self.vectorizer.builder_clear()
X_filter = self.domain_X_filter(domain)
predictions = self.get_doc_predictions_for_domain(domain)
sents = [self.X_list[i] for i in X_filter]
self.vectorizer.builder_add_docs(sents)
for interaction_class in interaction_classes:
self.vectorizer.builder_add_interaction_features(sents, predictions==interaction_class, prefix="rob-" + interaction_class + "-")
if use_vectorizer is None:
self.X = self.vectorizer.builder_fit_transform()
else:
self.X = self.vectorizer.builder_transform()
def get_doc_predictions_for_domain(self, domain):
uids = self.domain_uids(domain)
predictions = []
for uid in uids:
# get the indices of all sentences in the study with specified uid
X_filter = np.nonzero(self.X_uids==uid)[0]
# make a single string per doc
doc = " ".join([self.X_list[i] for i in X_filter])
# vectorize the docs, then predict using the model
X_doc = self.doc_vectorizer.transform(doc)
prediction = self.doc_clf.predict(X_doc)
# add the same prediction for each sentence
predictions.extend([prediction[0]] * len(X_filter))
return np.array(predictions)
def set_doc_model(self, doc_clf, doc_vectorizer):
"""
set a model which takes in a full text doc;
outputs a doc class "YES", "NO", or "UNKNOWN"
"""
self.doc_clf = doc_clf
self.doc_vectorizer = doc_vectorizer
def _document_frequency(X):
"""Count the number of non-zero values for each feature in csc_matrix X."""
return np.diff(X.indptr)
class ModularCountVectorizer():
"""
Similar to CountVectorizer from sklearn, but allows building up
of feature matrix gradually, and adding prefixes to feature names
(to identify interaction terms)
"""
def __init__(self, *args, **kwargs):
self.data = []
self.vectorizer = DictVectorizer(*args, **kwargs)
def _transform_X_to_dict(self, X, prefix=None, weighting=1):
"""
makes a list of dicts from a document
1. word tokenizes
2. creates {word1:1, word2:1...} dicts
(note all set to '1' since the DictVectorizer we use assumes all missing are 0)
"""
return [self._dict_from_word_list(
self._word_tokenize(document, prefix=prefix), weighting=1) for document in X]
def _word_tokenize(self, text, prefix=None, stopword=True):
"""
simple word tokenizer using the same rule as sklearn
punctuation is ignored, all 2 or more letter characters are a word
"""
stop_word_list = STOP_WORDS if stopword else set()
if prefix:
return [prefix + word.lower() for word in SIMPLE_WORD_TOKENIZER.findall(text)
if not word.lower() in stop_word_list]
else:
return [word.lower() for word in SIMPLE_WORD_TOKENIZER.findall(text)
if not word.lower() in stop_word_list]
def _dict_from_word_list(self, word_list, weighting=1):
return {word: weighting for word in word_list}
def _dictzip(self, dictlist1, dictlist2):
"""
zips together two lists of dicts of the same length
"""
# checks lists must be the same length
if len(dictlist1) != len(dictlist2):
raise IndexError("Unable to combine featuresets with different number of examples")
output = []
for dict1, dict2 in zip(dictlist1, dictlist2):
output.append(dict(dict1.items() + dict2.items()))
# note that this *overwrites* any duplicate keys with the key/value from dictlist2!!
return output
def transform(self, X, prefix=None):
# X is a list of document strings
# word tokenizes each one, then passes to a dict vectorizer
dict_list = self._transform_X_to_dict(X, prefix=prefix)
return self.vectorizer.transform(dict_list)
def fit_transform(self, X, prefix=None, max_features=None, low=None):
# X is a list of document strings
# word tokenizes each one, then passes to a dict vectorizer
dict_list = self._transform_X_to_dict(X, prefix=prefix)
X = self.vectorizer.fit_transform(dict_list)
if max_features is not None or low is not None:
X, removed = self._limit_features(X.tocsc(),
self.vectorizer.vocabulary_, low=low, limit=max_features)
print "pruned %s features!" % len(removed)
X = X.tocsc()
return self.vectorizer.fit_transform(dict_list)
def get_feature_names(self):
return self.vectorizer.get_feature_names()
def builder_clear(self):
self.builder = []
self.builder_len = 0
def builder_add_docs(self, X, prefix = None, weighting=1):
#pdb.set_trace()
if not self.builder:
self.builder_len = len(X)
self.builder = self._transform_X_to_dict(X, prefix=prefix, weighting=weighting)
else:
X_dicts = self._transform_X_to_dict(X, prefix=prefix, weighting=weighting)
self.builder = self._dictzip(self.builder, X_dicts)
def builder_add_interaction_features(self, X, interactions, prefix=None):
if prefix is None:
raise TypeError('Prefix is required when adding interaction features')
doc_list = [(sent if interacting else "") for sent, interacting in zip(X, interactions)]
self.builder_add_docs(doc_list, prefix)
def builder_fit_transform(self, max_features=None, low=2):
X = self.vectorizer.fit_transform(self.builder)
if max_features is not None or low is not None:
X, removed = self._limit_features(X.tocsc(),
self.vectorizer.vocabulary_, low=low, limit=max_features)
print "pruned %s features!" % len(removed)
X = X.tocsc()
return X #self.vectorizer.fit_transform(self.builder)
def builder_transform(self):
return self.vectorizer.transform(self.builder)
def _limit_features(self, cscmatrix, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return cscmatrix, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(cscmatrix)
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
# backward compatibility requires us to keep lower indices in ties!
# (and hence to reverse the sort by negating dfs)
mask_inds = (-dfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
return cscmatrix[:, kept_indices], removed_terms
def sentence_prediction_test(class_weight={1: 5, -1:1}, model=SentenceModel(test_mode=True)):
print
print
print
print "Sentence level prediction"
print "=" * 40
print
s = model
print "Model name:\t" + s.__class__.__name__
print s.__doc__
print "class_weight=%s" % (str(class_weight),)
s.generate_data()
s.vectorize()
for test_domain in CORE_DOMAINS:
print ("*"*40) + "\n\n" + test_domain + "\n\n" + ("*" * 40)
domain_uids = s.domain_uids(test_domain)
no_studies = len(domain_uids)
kf = KFold(no_studies, n_folds=5, shuffle=False, indices=True)
# # tuned_parameters = {"alpha": np.logspace(-4, -1, 10)}
# tuned_parameters = [{"alpha": np.logspace(-4, -1, 5)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 1, 5)]}]
# clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='recall')
print "making scorer"
ftwo_scorer = make_scorer(fbeta_score, beta=2)
tuned_parameters = [{"alpha": np.logspace(-4, -1, 10)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 1, 10)]}]
clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring=ftwo_scorer)
metrics = []
for fold_i, (train, test) in enumerate(kf):
X_train, y_train = s.X_y_uid_filtered(domain_uids[train], test_domain)
X_test, y_test = s.X_y_uid_filtered(domain_uids[test], test_domain)
clf.fit(X_train, y_train)
y_preds = clf.predict(X_test)
fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds))[:,1]
metrics.append(fold_metric) # get the scores for positive instances
print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
# if not sample and list_features:
# # not an obvious way to get best features for ensemble
# print show_most_informative_features(s.vectorizer, clf)
# summary score
summary_metrics = np.mean(metrics, axis=0)
print "=" * 40
print "mean score:\tprecision %.2f, recall %.2f, f-score %.2f" % (summary_metrics[0], summary_metrics[1], summary_metrics[2])
# then train all for most informative features
clf = SGDClassifier(loss="hinge", penalty="L2", alpha=0.01, class_weight={1: 5, -1: 1})
X_all = s.X_domain_all(test_domain)
y_all = s.y_domain_all(test_domain)
clf.fit(X_all, y_all)
print show_most_informative_features(s.vectorizer, clf)
def stupid_sentence_prediction_test(model=SentenceModel(test_mode=False)):
print
print
print
print "Sentence level prediction"
print "=" * 40
print
s = model
print "Model name:\t" + s.__class__.__name__
print s.__doc__
# print "class_weight=%s" % (str(class_weight),)
s.generate_data()
s.vectorize()
for test_domain in CORE_DOMAINS:
print ("*"*40) + "\n\n" + test_domain + "\n\n" + ("*" * 40)
domain_uids = s.domain_uids(test_domain)
no_studies = len(domain_uids)
kf = KFold(no_studies, n_folds=5, shuffle=False, indices=True)
print "making scorer"
metrics = []
for fold_i, (train, test) in enumerate(kf):
X_test, y_test = s.X_y_uid_filtered(domain_uids[test], test_domain)
y_preds = np.array([1] * len(y_test))
fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds))[:,1]
fold_metric = np.append(fold_metric, accuracy_score(y_test, y_preds))
metrics.append(fold_metric) # get the scores for positive instances
print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f, precision %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2], fold_metric[3])
# if not sample and list_features:
# # not an obvious way to get best features for ensemble
# print show_most_informative_features(s.vectorizer, clf)
# summary score
summary_metrics = np.mean(metrics, axis=0)
print "=" * 40
print "mean score:\tprecision %.2f, recall %.2f, f-score %.2f, precision %.2f" % (summary_metrics[0], summary_metrics[1], summary_metrics[2], summary_metrics[3])
def binary_doc_prediction_test(model=DocumentLevelModel(test_mode=False)):
print
print
print
print "Binary doc prediction"
print "=" * 40
print
s = model
s.generate_data(binarize=True)
s.vectorize()
for test_domain in CORE_DOMAINS:
print ("*"*40) + "\n\n" + test_domain + "\n\n" + ("*" * 40)
domain_uids = s.domain_uids(test_domain)
no_studies = len(domain_uids)
kf = KFold(no_studies, n_folds=5, shuffle=False, indices=True)
# # tuned_parameters = {"alpha": np.logspace(-4, -1, 10)}
# tuned_parameters = [{"alpha": np.logspace(-4, -1, 5)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 1, 5)]}]
# clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='recall')
# print "making scorer"
# ftwo_scorer = make_scorer(fbeta_score, beta=2)
tuned_parameters = {"alpha": np.logspace(-4, -1, 10), "class_weight": [{1: i, -1: 1} for i in np.logspace(-1, 1, 10)]}
clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring="f1")
metrics = []
for fold_i, (train, test) in enumerate(kf):
X_train, y_train = s.X_y_uid_filtered(domain_uids[train], test_domain)
X_test, y_test = s.X_y_uid_filtered(domain_uids[test], test_domain)
clf.fit(X_train, y_train)
y_preds = clf.predict(X_test)
fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds))[:,1]
metrics.append(fold_metric) # get the scores for positive instances
print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
# if not sample and list_features:
# # not an obvious way to get best features for ensemble
# print show_most_informative_features(s.vectorizer, clf)
# summary score
summary_metrics = np.mean(metrics, axis=0)
print "=" * 40
print "mean score:\tprecision %.2f, recall %.2f, f-score %.2f" % (summary_metrics[0], summary_metrics[1], summary_metrics[2])
# then train all for most informative features
clf = SGDClassifier(loss="hinge", penalty="L2", alpha=0.01, class_weight={1: 5, -1: 1})
X_all = s.X_domain_all(test_domain)
y_all = s.y_domain_all(test_domain)
clf.fit(X_all, y_all)
print show_most_informative_features(s.vectorizer, clf)
def multitask_document_prediction_test(model=MultiTaskDocumentModel(test_mode=False),
test_domain=CORE_DOMAINS[0]):
print "multitask!"
d = model
d.generate_data(binarize=True) # some variations use the quote data internally
# for sentence prediction (for additional features)
# d.X_uids contains all the UIds.
# d.y_uids contains a dictionary mapping domains to the UIds for
# which we have labels (in said domain)
#pdb.set_trace()
all_uids = d.X_uids
d.vectorize()
####
# the major change here is we don't loop through the domains!
tuned_parameters = {"alpha": np.logspace(-4, -1, 10)}
clf = GridSearchCV(SGDClassifier(loss="log", penalty="L2"),
tuned_parameters, scoring='f1')
kf = KFold(len(all_uids), n_folds=5, shuffle=False) ### TODO 250 is totally random
metrics = defaultdict(list)
for fold_i, (train, test) in enumerate(kf):
print "Training on fold", fold_i,
# note that we do *not* pass in a domain here, because
# we use *all* domain training data
X_train, y_train = d.X_y_uid_filtered(all_uids[train])
print "done!"
clf.fit(X_train, y_train)
print "Testing on fold", fold_i,
for domain in CORE_DOMAINS:
# multitask uses same trained model for all domains, but test on separate test data
X_test, y_test = d.X_y_uid_filtered(all_uids[test], domain)
y_preds = clf.predict(X_test)
fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds))[:,1]
metrics[domain].append(fold_metric) # get the scores for positive instances (save them up since all in the wrong order here!)
print "done!"
# then recreate in the right order for printout
for domain in CORE_DOMAINS:
print
print domain
print "*" * 60
print
for fold_i, fold_metric in enumerate(metrics[domain]):
print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
# summary score
summary_metrics = np.mean(metrics[domain], axis=0)
print "=" * 40
print "mean score:\tprecision %.2f, recall %.2f, f-score %.2f" % (summary_metrics[0], summary_metrics[1], summary_metrics[2])
def dummy_document_prediction():
print "dummy!"
d = DocumentLevelModel(test_mode=False)
d.generate_data(binarize=True) # some variations use the quote data internally
# for sentence prediction (for additional features)
d.vectorize()
all_uids = d.X_uids
kf = KFold(len(all_uids), n_folds=5, shuffle=False) ### TODO 250 is totally random
metrics = defaultdict(list)
for fold_i, (train, test) in enumerate(kf):
print "Testing on fold", fold_i,
for domain in CORE_DOMAINS:
# multitask uses same trained model for all domains, but test on separate test data
X_test, y_test = d.X_y_uid_filtered(all_uids[test], domain)
y_preds = np.array(([1] * len(y_test))) # everything gets marked low risk of bias
fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds))[:,1]
metrics[domain].append(fold_metric) # get the scores for positive instances (save them up since all in the wrong order here!)
print "done!"
# then recreate in the right order for printout
for domain in CORE_DOMAINS:
print
print domain
print "*" * 60
print
for fold_i, fold_metric in enumerate(metrics[domain]):
print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
# summary score
summary_metrics = np.mean(metrics[domain], axis=0)
print "=" * 40
print "mean score:\tprecision %.2f, recall %.2f, f-score %.2f" % (summary_metrics[0], summary_metrics[1], summary_metrics[2])
def multitask_hybrid_document_prediction_test(model=MultiTaskHybridDocumentModel(test_mode=True)):
print "multitask! and hybrid! :)"
d = model
d.generate_data(binarize=True) # some variations use the quote data internally
# for sentence prediction (for additional features)
# d.X_uids contains all the UIds.
# d.y_uids contains a dictionary mapping domains to the UIds for
# which we have labels (in said domain)
#pdb.set_trace()
all_uids = d.X_uids
# d.vectorize()
####
# the major change here is we don't loop through the domains!
tuned_parameters = {"alpha": np.logspace(-4, -1, 10)}
clf = GridSearchCV(SGDClassifier(loss="log", penalty="L2"),
tuned_parameters, scoring='f1')
kf = KFold(len(all_uids), n_folds=5, shuffle=False)
metrics = defaultdict(list)
print "...generating sentence data...",
s = SentenceModel(test_mode=False)
s.generate_data()
s.vectorize()
print "done!"
sent_tuned_parameters = [{"alpha": np.logspace(-4, -1, 5)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 2, 10)]}]
for fold_i, (train, test) in enumerate(kf):
sent_clfs = defaultdict(list)
for domain in CORE_DOMAINS:
sents_X, sents_y = s.X_domain_all(domain=domain), s.y_domain_all(domain=domain)
sent_clfs[domain] = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"),
tuned_parameters, scoring='recall')
print "Training on fold", fold_i,
d.set_sent_model(sent_clfs, s.vectorizer)
d.vectorize()
# note that we do *not* pass in a domain here, because
# we use *all* domain training data
X_train, y_train = d.X_y_uid_filtered(all_uids[train])
sent_clfs[domain].fit(sents_X, sents_y)
clf.fit(X_train, y_train)
print "done!"
print "Testing on fold", fold_i,
for domain in CORE_DOMAINS:
# multitask uses same trained model for all domains, but test on separate test data
X_test, y_test = d.X_y_uid_filtered(all_uids[test], domain)
y_preds = clf.predict(X_test)
fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds))[:,1]
metrics[domain].append(fold_metric) # get the scores for positive instances (save them up since all in the wrong order here!)
print "done!"
# then recreate in the right order for printout
for domain in CORE_DOMAINS:
print
print domain
print "*" * 60
print
for fold_i, fold_metric in enumerate(metrics[domain]):
print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
# summary score
summary_metrics = np.mean(metrics[domain], axis=0)
print "=" * 40
print "mean score:\tprecision %.2f, recall %.2f, f-score %.2f" % (summary_metrics[0], summary_metrics[1], summary_metrics[2])
def document_prediction_test(model=DocumentLevelModel(test_mode=False)):
print "Document level prediction"
print "=" * 40
print
d = model
d.generate_data() # some variations use the quote data internally
# for sentence prediction (for additional features)
d.vectorize()
for test_domain in CORE_DOMAINS:
print ("*"*40) + "\n\n" + test_domain + "\n\n" + ("*" * 40)
# f1_prefer_nos = make_scorer(f1_score, pos_label="NO")
tuned_parameters = {"alpha": np.logspace(-4, -1, 10)}
clf = GridSearchCV(SGDClassifier(loss="log", penalty="L2"), tuned_parameters, scoring='f1')
# clf = SGDClassifier(loss="hinge", penalty="L2")
domain_uids = d.domain_uids(test_domain)
no_studies = len(domain_uids)
kf = KFold(no_studies, n_folds=5, shuffle=False)
metrics = []
for fold_i, (train, test) in enumerate(kf):
X_train, y_train = d.X_y_uid_filtered(domain_uids[train], test_domain)
X_test, y_test = d.X_y_uid_filtered(domain_uids[test], test_domain)
clf.fit(X_train, y_train)
y_preds = clf.predict(X_test)
fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds, labels=RoB_CLASSES))[:3]
print ('fold %d\t' % (fold_i)) + '\t'.join(RoB_CLASSES)
# for metric_type, scores in zip(["prec.", "recall", "f1"], fold_metric):
# print "%s\t%.2f\t%.2f\t%.2f" % (metric_type, scores[0], scores[1], scores[2])
# print
# print clf.best_params_
#### START CONFUSION
real_no_indices = (y_test=="NO")
print "The actual NOs were predicted as..."
print collections.Counter(y_preds[real_no_indices])
#### END CONFUSION
metrics.append(fold_metric) # get the scores for positive instances
# print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
mean_scores = np.mean(metrics, axis=0)
print "=" * 40
print 'means \t' + '\t'.join(RoB_CLASSES)
for metric_type, scores in zip(["prec.", "recall", "f1"], mean_scores):
print "%s\t%.2f\t%.2f\t%.2f" % (metric_type, scores[0], scores[1], scores[2])
print
# then train all for most informative features
clf = SGDClassifier(loss="hinge", penalty="L2", alpha=0.01)
X_all = d.X_domain_all(test_domain)
y_all = d.y_domain_all(test_domain)
clf.fit(X_all, y_all)
print show_most_informative_features_ynu(d.vectorizer, clf)
def simple_hybrid_prediction_test(model=HybridModel(test_mode=True)):
print "Hybrid prediction"
print "=" * 40
print
s = model
s.generate_data() # some variations use the quote data internally
# for sentence prediction (for additional features)
for test_domain in CORE_DOMAINS:
s.vectorize(test_domain)
print ("*"*40) + "\n\n" + test_domain + "\n\n" + ("*" * 40)
domain_uids = s.domain_uids(test_domain)
no_studies = len(domain_uids)
kf = KFold(no_studies, n_folds=5, shuffle=False)
# tuned_parameters = [{"alpha": np.logspace(-4, -1, 5)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 1, 5)]}]
# clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='f1')
print "making scorer"
ftwo_scorer = make_scorer(fbeta_score, beta=2)
tuned_parameters = [{"alpha": np.logspace(-4, -1, 10)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 1, 10)]}]
clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring=ftwo_scorer)
metrics = []
for fold_i, (train, test) in enumerate(kf):
X_train, y_train = s.X_y_uid_filtered(domain_uids[train], test_domain)
X_test, y_test = s.X_y_uid_filtered(domain_uids[test], test_domain)
clf.fit(X_train, y_train)
y_preds = clf.predict(X_test)
fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds))[:,1]
metrics.append(fold_metric) # get the scores for positive instances
print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
metrics.append(fold_metric) # get the scores for positive instances
# summary score
summary_metrics = np.mean(metrics, axis=0)
print "=" * 40
print "mean score:\tprecision %.2f, recall %.2f, f-score %.2f" % (summary_metrics[0], summary_metrics[1], summary_metrics[2])
# def simple_hybrid_prediction_test(model=HybridModel(test_mode=True)):
# print "Hybrid prediction"
# print "=" * 40
# print
# s = model
# s.generate_data() # some variations use the quote data internally
# # for sentence prediction (for additional features)
# for test_domain in CORE_DOMAINS:
# s.vectorize(test_domain)
# print ("*"*40) + "\n\n" + test_domain + "\n\n" + ("*" * 40)
# domain_uids = s.domain_uids(test_domain)
# no_studies = len(domain_uids)
# kf = KFold(no_studies, n_folds=5, shuffle=False)
# tuned_parameters = [{"alpha": np.logspace(-4, -1, 5)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 1, 5)]}]
# clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='f1')
# metrics = []
# for fold_i, (train, test) in enumerate(kf):
# X_train, y_train = s.X_y_uid_filtered(domain_uids[train], test_domain)
# X_test, y_test = s.X_y_uid_filtered(domain_uids[test], test_domain)
# clf.fit(X_train, y_train)
# y_preds = clf.predict(X_test)
# fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds))[:,1]
# metrics.append(fold_metric) # get the scores for positive instances
# print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
# metrics.append(fold_metric) # get the scores for positive instances
# # summary score
# summary_metrics = np.mean(metrics, axis=0)
# print "=" * 40
# print "mean score:\tprecision %.2f, recall %.2f, f-score %.2f" % (summary_metrics[0], summary_metrics[1], summary_metrics[2])
def true_hybrid_prediction_test(model, test_mode=False):
print "True Hybrid prediction"
print "=" * 40
print
s = model
s.generate_data() # some variations use the quote data internally
# for sentence prediction (for additional features)
s_cheat = HybridModel(test_mode=False)
s_cheat.generate_data()
for test_domain in CORE_DOMAINS:
print ("*"*40) + "\n\n" + test_domain + "\n\n" + ("*" * 40)
domain_uids = s.domain_uids(test_domain)
no_studies = len(domain_uids)
kf = KFold(no_studies, n_folds=5, shuffle=False)
print "making scorer"
ftwo_scorer = make_scorer(fbeta_score, beta=2)
tuned_parameters = [{"alpha": np.logspace(-4, -1, 10)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 1, 10)]}]
clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring=ftwo_scorer)
metrics = []
for fold_i, (train, test) in enumerate(kf):
print "training doc level model with test data, please wait..."
d = DocumentLevelModel(test_mode=False)
d.generate_data(uid_filter=domain_uids[train])
d.vectorize()
doc_X, doc_y = d.X_domain_all(domain=test_domain), d.y_domain_all(domain=test_domain)
doc_tuned_parameters = {"alpha": np.logspace(-4, -1, 10)}
doc_clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), doc_tuned_parameters, scoring='f1')
doc_clf.fit(doc_X, doc_y)
s.set_doc_model(doc_clf, d.vectorizer)
s_cheat.vectorize(test_domain)
s.vectorize(test_domain, use_vectorizer=s_cheat.vectorizer)
X_train, y_train = s_cheat.X_y_uid_filtered(domain_uids[train], test_domain)
# train on the *true* labels
X_test, y_test = s.X_y_uid_filtered(domain_uids[test], test_domain)
clf.fit(X_train, y_train)
y_preds = clf.predict(X_test)
fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds))[:,1]
metrics.append(fold_metric) # get the scores for positive instances
print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
metrics.append(fold_metric) # get the scores for positive instances
# summary score
summary_metrics = np.mean(metrics, axis=0)
print "=" * 40
print "mean score:\tprecision %.2f, recall %.2f, f-score %.2f" % (summary_metrics[0], summary_metrics[1], summary_metrics[2])
def hybrid_doc_prediction_test(model=HybridDocModel(test_mode=True)):
print "Hybrid doc level prediction"
print "=" * 40
print
d = model
d.generate_data() # some variations use the quote data internally
# for sentence prediction (for additional features)
for test_domain in CORE_DOMAINS:
print ("*"*40) + "\n\n" + test_domain + "\n\n" + ("*" * 40)
domain_uids = d.domain_uids(test_domain)
no_studies = len(domain_uids)
kf = KFold(no_studies, n_folds=5, shuffle=False)
tuned_parameters = {"alpha": np.logspace(-4, -1, 5)}
clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='f1')
metrics = []
for fold_i, (train, test) in enumerate(kf):
s = SentenceModel(test_mode=False)
s.generate_data(uid_filter=domain_uids[train])
s.vectorize()
sents_X, sents_y = s.X_domain_all(domain=test_domain), s.y_domain_all(domain=test_domain)
sent_tuned_parameters = [{"alpha": np.logspace(-4, -1, 5)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 2, 10)]}]
sent_clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='recall')
sent_clf.fit(sents_X, sents_y)
d.set_sent_model(sent_clf, s.vectorizer)
d.vectorize(test_domain)
X_train, y_train = d.X_y_uid_filtered(domain_uids[train], test_domain)
X_test, y_test = d.X_y_uid_filtered(domain_uids[test], test_domain)
clf.fit(X_train, y_train)
y_preds = clf.predict(X_test)
fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds, labels=RoB_CLASSES))[:3]
print ('fold %d\t' % (fold_i)) + '\t'.join(RoB_CLASSES)
for metric_type, scores in zip(["prec.", "recall", "f1"], fold_metric):
print "%s\t%.2f\t%.2f\t%.2f" % (metric_type, scores[0], scores[1], scores[2])
print
metrics.append(fold_metric) # get the scores for positive instances
# print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
mean_scores = np.mean(metrics, axis=0)
print "=" * 40
print 'means \t' + '\t'.join(RoB_CLASSES)
for metric_type, scores in zip(["prec.", "recall", "f1"], mean_scores):
print "%s\t%.2f\t%.2f\t%.2f" % (metric_type, scores[0], scores[1], scores[2])
print
def binary_hybrid_doc_prediction_test(model=HybridDocModel(test_mode=True)):
print "Binary hybrid doc level prediction"
print "=" * 40
print
d = model
d.generate_data(binarize=True) # some variations use the quote data internally
# for sentence prediction (for additional features)
for test_domain in CORE_DOMAINS:
print ("*"*40) + "\n\n" + test_domain + "\n\n" + ("*" * 40)
domain_uids = d.domain_uids(test_domain)
no_studies = len(domain_uids)
kf = KFold(no_studies, n_folds=5, shuffle=False)
tuned_parameters = {"alpha": np.logspace(-4, -1, 10), "class_weight": [{1: i, -1: 1} for i in np.logspace(-1, 1, 10)]}
clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='f1')
metrics = []
for fold_i, (train, test) in enumerate(kf):
s = SentenceModel(test_mode=True)
s.generate_data(uid_filter=domain_uids[train])
s.vectorize()
sents_X, sents_y = s.X_domain_all(domain=test_domain), s.y_domain_all(domain=test_domain)
sent_tuned_parameters = [{"alpha": np.logspace(-4, -1, 5)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 2, 10)]}]
sent_clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='recall')
sent_clf.fit(sents_X, sents_y)
d.set_sent_model(sent_clf, s.vectorizer)
d.vectorize(test_domain)
X_train, y_train = d.X_y_uid_filtered(domain_uids[train], test_domain)
X_test, y_test = d.X_y_uid_filtered(domain_uids[test], test_domain)
clf.fit(X_train, y_train)
y_preds = clf.predict(X_test)
fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds))[:,1]
metrics.append(fold_metric) # get the scores for positive instances
print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
metrics.append(fold_metric) # get the scores for positive instances
# print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
summary_metrics = np.mean(metrics, axis=0)
print "=" * 40
print "mean score:\tprecision %.2f, recall %.2f, f-score %.2f" % (summary_metrics[0], summary_metrics[1], summary_metrics[2])
# then train all for most informative features
s = SentenceModel(test_mode=True)
s.generate_data(uid_filter=domain_uids)
s.vectorize()
sents_X, sents_y = s.X_domain_all(domain=test_domain), s.y_domain_all(domain=test_domain)
sent_tuned_parameters = [{"alpha": np.logspace(-4, -1, 5)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 2, 10)]}]
sent_clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='recall')
sent_clf.fit(sents_X, sents_y)
d.set_sent_model(sent_clf, s.vectorizer)
d.vectorize(test_domain)
clf = SGDClassifier(loss="hinge", penalty="L2", alpha=0.1, class_weight={1: 1, -1: 1})
X_all, y_all = d.X_y_uid_filtered(domain_uids, test_domain)
clf.fit(X_all, y_all)
print show_most_informative_features(s.vectorizer, clf)
def binary_hybrid_doc_prediction_test2(model=HybridDocModel(test_mode=True)):
print "Binary hybrid doc level prediction version 2 (maybe quicker!!)"
print "=" * 40
print
d = model
d.generate_data(binarize=True) # some variations use the quote data internally
# for sentence prediction (for additional features)
for test_domain in CORE_DOMAINS:
print ("*"*40) + "\n\n" + test_domain + "\n\n" + ("*" * 40)
domain_uids = d.domain_uids(test_domain)
no_studies = len(domain_uids)
kf = KFold(no_studies, n_folds=5, shuffle=False)
tuned_parameters = {"alpha": np.logspace(-4, -1, 10), "class_weight": [{1: i, -1: 1} for i in np.logspace(-1, 1, 10)]}
clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='f1')
metrics = []
s = SentenceModel(test_mode=True)
s.generate_data(uid_filter=domain_uids)
s.vectorize()
for fold_i, (train, test) in enumerate(kf):
sents_X, sents_y = s.X_y_uid_filtered(domain_uids[test], test_domain)
# sent_tuned_parameters = [{"alpha": np.logspace(-4, -1, 5)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 2, 10)]}]
# sent_clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='recall')
sent_tuned_parameters = [{"alpha": np.logspace(-4, -1, 5)}]
sent_clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2", class_weight={1:5, -1:1}), sent_tuned_parameters, scoring='recall')
sent_clf.fit(sents_X, sents_y)
d.set_sent_model(sent_clf, s.vectorizer)
d.vectorize(test_domain)
X_train, y_train = d.X_y_uid_filtered(domain_uids[train], test_domain)
X_test, y_test = d.X_y_uid_filtered(domain_uids[test], test_domain)
clf.fit(X_train, y_train)
# print show_most_informative_features(s.vectorizer, clf.best_estimator_)
print show_most_informative_features(s.vectorizer, clf)
# y_preds = clf.predict(X_test)
# fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds))[:,1]
# metrics.append(fold_metric) # get the scores for positive instances
# print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
# metrics.append(fold_metric) # get the scores for positive instances
# summary_metrics = np.mean(metrics, axis=0)
# print "=" * 40
# print "mean score:\tprecision %.2f, recall %.2f, f-score %.2f" % (summary_metrics[0], summary_metrics[1], summary_metrics[2])
# # then train all for most informative features
# sents_X, sents_y = s.X_domain_all(domain=test_domain), s.y_domain_all(domain=test_domain)
# sent_tuned_parameters = [{"alpha": np.logspace(-4, -1, 5)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 2, 10)]}]
# sent_clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='recall')
# sent_clf.fit(sents_X, sents_y)
# d.set_sent_model(sent_clf, s.vectorizer)
# d.vectorize(test_domain)
# clf = SGDClassifier(loss="hinge", penalty="L2", alpha=0.5, class_weight={1: 1, -1: 1})
# X_all, y_all = d.X_y_uid_filtered(domain_uids, test_domain)
# clf.fit(X_all, y_all)
def main():
# dummy_document_prediction()
stupid_sentence_prediction_test()
# true_hybrid_prediction_test(model=HybridModelProbablistic(test_mode=False))
# sentence_prediction_test(model=SentenceModel(test_mode=False))
# simple_hybrid_prediction_test(model=HybridModel(test_mode=False))
# binary_doc_prediction_test()
#print "Try weighting sentences better"
#binary_hybrid_doc_prediction_test2()
# binary_hybrid_doc_prediction_test()
# hybrid_doc_prediction_test(model=HybridDocModel2(test_mode=False))
# document_prediction_test(model=DocumentLevelModel(test_mode=False))
# multitask_document_prediction_test(model=MultiTaskDocumentModel(test_mode=False))
# multitask_hybrid_document_prediction_test(model=MultiTaskHybridDocumentModel(test_mode=False))
if __name__ == '__main__':
main()
| gpl-3.0 |
harshaneelhg/scikit-learn | examples/linear_model/plot_lasso_lars.py | 363 | 1080 | #!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
bestwpw/BDA_py_demos | demos_ch5/demo5_2.py | 19 | 3326 | """Bayesian Data Analysis, 3rd ed
Chapter 5, demo 2
Hierarchical model for SAT-example data (BDA3, p. 102)
"""
from __future__ import division
import numpy as np
from scipy.stats import norm
import scipy.io # For importing a matlab file
import matplotlib.pyplot as plt
# Edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2)
plt.rc('axes', color_cycle=(plt.rcParams['lines.color'],)) # Disable color cycle
# SAT-example data (BDA3 p. 120)
# y is the estimated treatment effect
# s is the standard error of effect estimate
y = np.array([28, 8, -3, 7, -1, 1, 18, 12])
s = np.array([15, 10, 16, 11, 9, 11, 10, 18])
M = len(y)
# load the pre-computed results for the hierarchical model
# replace this with your own code in Ex 5.1*
hres_path = '../utilities_and_data/demo5_2.mat'
hres = scipy.io.loadmat(hres_path)
''' Content information of the precalculated results:
>>> scipy.io.whosmat('demo5_2.mat')
[('pxm', (8, 500), 'double'),
('t', (1, 1000), 'double'),
('tp', (1, 1000), 'double'),
('tsd', (8, 1000), 'double'),
('tm', (8, 1000), 'double')]
'''
pxm = hres['pxm']
t = hres['t'][0]
tp = hres['tp'][0]
tsd = hres['tsd']
tm = hres['tm']
# plot the separate, pooled and hierarchical models
fig, axes = plt.subplots(3, 1, sharex=True, figsize=(8,10))
x = np.linspace(-40, 60, 500)
# separate
lines = axes[0].plot(x, norm.pdf(x[:,None], y[1:], s[1:]), linewidth=1)
line, = axes[0].plot(x, norm.pdf(x, y[0], s[0]), 'r')
axes[0].legend((line, lines[1]), ('school A', 'other schools'),
loc='upper left')
axes[0].set_yticks(())
axes[0].set_title('separate model')
# pooled
axes[1].plot(
x,
norm.pdf(
x,
np.sum(y/s**2)/np.sum(1/s**2),
np.sqrt(1/np.sum(1/s**2))
),
label='All schools'
)
axes[1].legend(loc='upper left')
axes[1].set_yticks(())
axes[1].set_title('pooled model')
# hierarchical
lines = axes[2].plot(x, pxm[1:].T, linewidth=1)
line, = axes[2].plot(x, pxm[0], 'r')
axes[2].legend((line, lines[1]), ('school A', 'other schools'),
loc='upper left')
axes[2].set_yticks(())
axes[2].set_title('hierarchical model')
axes[2].set_xlabel('Treatment effect')
# plot various marginal and conditional posterior summaries
fig, axes = plt.subplots(3, 1, sharex=True, figsize=(8,10))
axes[0].plot(t, tp)
axes[0].set_yticks(())
axes[0].set_title(r'marginal posterior density $p(\tau|y)$')
axes[0].set_ylabel(r'$p(\tau|y)$', fontsize=20)
axes[0].set_xlim([0,35])
lines = axes[1].plot(t, tm[1:].T, linewidth=1)
line, = axes[1].plot(t, tm[0].T, 'r')
axes[1].legend((line, lines[1]), ('school A', 'other schools'),
loc='upper left')
axes[1].set_title(r'conditional posterior means of effects '
r'$\operatorname{E}(\theta_j|\tau,y)$')
axes[1].set_ylabel(r'$\operatorname{E}(\theta_j|\tau,y)$', fontsize=20)
lines = axes[2].plot(t, tsd[1:].T, linewidth=1)
line, = axes[2].plot(t, tsd[0].T, 'r')
axes[2].legend((line, lines[1]), ('school A', 'other schools'),
loc='upper left')
axes[2].set_title(r'standard deviations of effects '
r'$\operatorname{sd}(\theta_j|\tau,y)$')
axes[2].set_ylabel(r'$\operatorname{sd}(\theta_j|\tau,y)$', fontsize=20)
axes[2].set_xlabel(r'$\tau$', fontsize=20)
plt.show()
| gpl-3.0 |
otmaneJai/Zipline | zipline/sources/data_frame_source.py | 26 | 5253 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tools to generate data sources.
"""
import numpy as np
import pandas as pd
from zipline.gens.utils import hash_args
from zipline.sources.data_source import DataSource
class DataFrameSource(DataSource):
"""
Data source that yields from a pandas DataFrame.
:Axis layout:
* columns : sids
* index : datetime
:Note:
Bars where the price is nan are filtered out.
"""
def __init__(self, data, **kwargs):
assert isinstance(data.index, pd.tseries.index.DatetimeIndex)
# Only accept integer SIDs as the items of the DataFrame
assert isinstance(data.columns, pd.Int64Index)
# TODO is ffilling correct/necessary?
# Forward fill prices
self.data = data.fillna(method='ffill')
# Unpack config dictionary with default values.
self.start = kwargs.get('start', self.data.index[0])
self.end = kwargs.get('end', self.data.index[-1])
self.sids = self.data.columns
# Hash_value for downstream sorting.
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
self.started_sids = set()
@property
def mapping(self):
return {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
}
@property
def instance_hash(self):
return self.arg_string
def raw_data_gen(self):
for dt, series in self.data.iterrows():
for sid, price in series.iteritems():
# Skip SIDs that can not be forward filled
if np.isnan(price) and \
sid not in self.started_sids:
continue
self.started_sids.add(sid)
event = {
'dt': dt,
'sid': sid,
'price': price,
# Just chose something large
# if no volume available.
'volume': 1e9,
}
yield event
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
class DataPanelSource(DataSource):
"""
Data source that yields from a pandas Panel.
:Axis layout:
* items : sids
* major_axis : datetime
* minor_axis : price, volume, ...
:Note:
Bars where the price is nan are filtered out.
"""
def __init__(self, data, **kwargs):
assert isinstance(data.major_axis, pd.tseries.index.DatetimeIndex)
# Only accept integer SIDs as the items of the Panel
assert isinstance(data.items, pd.Int64Index)
# TODO is ffilling correct/necessary?
# forward fill with volumes of 0
self.data = data.fillna(value={'volume': 0})
self.data = self.data.fillna(method='ffill')
# Unpack config dictionary with default values.
self.start = kwargs.get('start', self.data.major_axis[0])
self.end = kwargs.get('end', self.data.major_axis[-1])
self.sids = self.data.items
# Hash_value for downstream sorting.
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
self.started_sids = set()
@property
def mapping(self):
mapping = {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
}
# Add additional fields.
for field_name in self.data.minor_axis:
if field_name in ['price', 'volume', 'dt', 'sid']:
continue
mapping[field_name] = (lambda x: x, field_name)
return mapping
@property
def instance_hash(self):
return self.arg_string
def raw_data_gen(self):
for dt in self.data.major_axis:
df = self.data.major_xs(dt)
for sid, series in df.iteritems():
# Skip SIDs that can not be forward filled
if np.isnan(series['price']) and \
sid not in self.started_sids:
continue
self.started_sids.add(sid)
event = {
'dt': dt,
'sid': sid,
}
for field_name, value in series.iteritems():
event[field_name] = value
yield event
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
| apache-2.0 |
i19870503/i19870503 | Python/eggnog2go_anno.py | 1 | 2591 | import os
import re
import pandas as pd
import string
import itertools
import numpy as np
import sys
import argparse
from collections import OrderedDict
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create GO annotation and enrichment file')
parser.add_argument('-i',type=str,dest='infile',required=True,help="Input file")
parser.add_argument('-o',type=str,dest='out',required=True,help="Ouput file")
parser.add_argument('-db',type=str,dest='db',required=True,help="GO Database file")
args = parser.parse_args()
print (args)
def sort_uniq(sequence):
return (x[0] for x in itertools.groupby(sorted(sequence)))
path = "/home/zluna/Work/GO"
fout = open(args.out+"_anno.xls", 'w')
print("Gene_id", "GO_annotation", sep = '\t', file = fout)
go_db = pd.read_table(os.path.join(path, args.db), header = None)
eggout = pd.read_table(os.path.join(path, args.infile), header = None)
#pd.DataFrame.head(eggout)
#eggout.head(100)
dict = OrderedDict()
first_flag = 1
a = list(go_db[0])
for i in range(len(eggout)):
gene_id = eggout[0][i]
go_id = eggout[5][i]
if pd.isnull(eggout[5][i]):
go_id = ''
#print(gene_id, kegg_id, type(kegg_id), sep ='\t')
go_id = go_id.split(',')
if len(go_id) == 0:
continue
go_term = '; '.join(list(go_db[go_db[2].isin(go_id)][0]))
#print(gene_id, go_id, go_term, sep ='\t')
go_sum = []
sel_go_table = go_db[go_db[2].isin(go_id)]
for j in range(len(sel_go_table)):
go_sum.append(''.join(( list(sel_go_table[2])[j], "~", list(sel_go_table[0])[j])))
print(gene_id, str(go_sum).strip('[]').replace(']','').replace("'","").replace(", ","; "), sep = '\t', file = fout)
a = list(go_db[2])
### Use dictionary
for k in range(len(a)):
if str(go_sum).find(a[k]) != -1 :
if a[k] not in dict.keys():
### The value must be list type, if just give the 'gene_id' as the value of key, it can not use 'append' method to add the new 'gene_id' to the existing key.
dict[a[k]] = []
dict[a[k]].append(gene_id)
else:
dict[a[k]].append(gene_id)
#dict[a[j]] = [dict[a[j]], gene_id]
fout.close()
fout2 = open(args.out+"_enrich.xls", 'w')
print('GOID', 'Term', 'Genes', 'Gene_count', sep = '\t', file = fout2)
for key,values in dict.items():
print(key, list(go_db[go_db[2] == key][0]), str(values).strip('[]').replace(']','').replace("'",""), len(values), sep ='\t', file = fout2)
fout2.cloes()
| gpl-2.0 |
CCI-Tools/cate-core | cate/ops/index.py | 1 | 8641 |
# The MIT License (MIT)
# Copyright (c) 2016, 2017 by the ESA CCI Toolbox development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Description
===========
Index calculation operations
Functions
=========
"""
import xarray as xr
import pandas as pd
from cate.core.op import op, op_input
from cate.ops.select import select_var
from cate.ops.subset import subset_spatial
from cate.ops.anomaly import anomaly_external
from cate.core.types import PolygonLike, VarName, ValidationError
from cate.util.monitor import Monitor
_ALL_FILE_FILTER = dict(name='All Files', extensions=['*'])
@op(tags=['index'])
@op_input('file', file_open_mode='r', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER])
@op_input('var', value_set_source='ds', data_type=VarName)
def enso_nino34(ds: xr.Dataset,
var: VarName.TYPE,
file: str,
threshold: float = None,
monitor: Monitor = Monitor.NONE) -> pd.DataFrame:
"""
Calculate nino34 index, which is defined as a five month running mean of
anomalies of monthly means of SST data in Nino3.4 region:: lon_min=-170
lat_min=-5 lon_max=-120 lat_max=5.
:param ds: A monthly SST dataset
:param file: Path to the reference data file e.g. a climatology. A suitable reference dataset
can be generated using the long_term_average operation
:param var: Dataset variable (geophysial quantity) to use for index
calculation.
:param threshold: If given, boolean El Nino/La Nina timeseries will be
calculated and added to the output dataset according to the given
threshold. Where anomaly larger than the positive value of the threshold
indicates El Nino and anomaly smaller than the negative of the given
threshold indicates La Nina.
:param monitor: a progress monitor.
:return: A dataset that contains the index timeseries.
"""
n34 = '-170, -5, -120, 5'
name = 'ENSO N3.4 Index'
return _generic_index_calculation(ds, var, n34, 5, file, name, threshold, monitor)
@op(tags=['index'])
@op_input('var', value_set_source='ds', data_type=VarName)
@op_input('file', file_open_mode='r', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER])
@op_input('region', value_set=['N1+2', 'N3', 'N34', 'N4', 'custom'])
@op_input('custom_region', data_type=PolygonLike)
def enso(ds: xr.Dataset,
var: VarName.TYPE,
file: str,
region: str = 'n34',
custom_region: PolygonLike.TYPE = None,
threshold: float = None,
monitor: Monitor = Monitor.NONE) -> pd.DataFrame:
"""
Calculate ENSO index, which is defined as a five month running mean of
anomalies of monthly means of SST data in the given region.
:param ds: A monthly SST dataset
:param file: Path to the reference data file e.g. a climatology. A suitable reference dataset
can be generated using the long_term_average operation
:param var: Dataset variable to use for index calculation
:param region: Region for index calculation, the default is Nino3.4
:param custom_region: If 'custom' is chosen as the 'region', this parameter
has to be provided to set the desired region.
:param threshold: If given, boolean El Nino/La Nina timeseries will be
calculated and added to the output dataset, according to the given
threshold. Where anomaly larger than then positive value of the threshold
indicates El Nino and anomaly smaller than the negative of the given
threshold indicates La Nina.
:param monitor: a progress monitor.
:return: A dataset that contains the index timeseries.
"""
regions = {'N1+2': '-90, -10, -80, 0',
'N3': '-150, -5, -90, 5',
'N3.4': '-170, -5, -120, 5',
'N4': '160, -5, -150, 5',
'custom': custom_region}
converted_region = PolygonLike.convert(regions[region])
if not converted_region:
raise ValidationError('No region has been provided to ENSO index calculation')
name = 'ENSO ' + region + ' Index'
if 'custom' == region:
name = 'ENSO Index over ' + PolygonLike.format(converted_region)
return _generic_index_calculation(ds, var, converted_region, 5, file, name, threshold, monitor)
@op(tags=['index'])
@op_input('var', value_set_source='ds', data_type=VarName)
@op_input('file', file_open_mode='r', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER])
def oni(ds: xr.Dataset,
var: VarName.TYPE,
file: str,
threshold: float = None,
monitor: Monitor = Monitor.NONE) -> pd.DataFrame:
"""
Calculate ONI index, which is defined as a three month running mean of
anomalies of monthly means of SST data in the Nino3.4 region.
:param ds: A monthly SST dataset
:param file: Path to the reference data file e.g. a climatology. A suitable reference dataset
can be generated using the long_term_average operation
:param var: Dataset variable to use for index calculation
:param threshold: If given, boolean El Nino/La Nina timeseries will be
calculated and added to the output dataset, according to the given
threshold. Where anomaly larger than then positive value of the threshold
indicates El Nino and anomaly smaller than the negative of the given
threshold indicates La Nina.
:param monitor: a progress monitor.
:return: A dataset that containts the index timeseries
"""
n34 = '-170, -5, -120, 5'
name = 'ONI Index'
return _generic_index_calculation(ds, var, n34, 3, file, name, threshold, monitor)
def _generic_index_calculation(ds: xr.Dataset,
var: VarName.TYPE,
region: PolygonLike.TYPE,
window: int,
file: str,
name: str,
threshold: float = None,
monitor: Monitor = Monitor.NONE) -> pd.DataFrame:
"""
A generic index calculation. Where an index is defined as an anomaly
against the given reference of a moving average of the given window size of
the given given region of the given variable of the given dataset.
:param ds: Dataset from which to calculate the index
:param var: Variable from which to calculate index
:param region: Spatial subset from which to calculate the index
:param window: Window size for the moving average
:param file: Path to the reference file
:param threshold: Absolute threshold that indicates an ENSO event
:param name: Name of the index
:param monitor: a progress monitor.
:return: A dataset that contains the index timeseries
"""
var = VarName.convert(var)
region = PolygonLike.convert(region)
with monitor.starting("Calculate the index", total_work=2):
ds = select_var(ds, var)
ds_subset = subset_spatial(ds, region)
anom = anomaly_external(ds_subset, file, monitor=monitor.child(1))
with monitor.child(1).observing("Calculate mean"):
ts = anom.mean(dim=['lat', 'lon'])
df = pd.DataFrame(data=ts[var].values, columns=[name], index=ts.time.values)
retval = df.rolling(window=window, center=True).mean().dropna()
if threshold is None:
return retval
retval['El Nino'] = pd.Series((retval[name] > threshold),
index=retval.index)
retval['La Nina'] = pd.Series((retval[name] < -threshold),
index=retval.index)
return retval
| mit |
huobaowangxi/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 261 | 4490 | import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
| bsd-3-clause |
chungjjang80/FRETBursts | fretbursts/burstlib.py | 1 | 133746 | #
# FRETBursts - A single-molecule FRET burst analysis toolkit.
#
# Copyright (C) 2013-2016 The Regents of the University of California,
# Antonino Ingargiola <tritemio@gmail.com>
#
"""
This module contains all the main FRETBursts analysis functions.
`burstslib.py` defines the fundamental object `Data()` that contains both the
experimental data (attributes) and the high-level analysis routines (methods).
Furthermore it loads all the remaining **FRETBursts** modules (except for
`loaders.py`).
For usage example see the IPython Notebooks in sub-folder "notebooks".
"""
from __future__ import print_function, absolute_import, division
from future.utils import raise_from
from builtins import range, zip
import os
import hashlib
import numpy as np
import copy
from numpy import zeros, size, r_
import scipy.stats as SS
from .utils.misc import pprint, clk_to_s, deprecate
from .poisson_threshold import find_optimal_T_bga
from . import fret_fit
from . import bg_cache
from .ph_sel import Ph_sel
from .fretmath import gamma_correct_E, gamma_uncorrect_E
from .phtools import burstsearch as bslib
from .phtools.burstsearch import (
# Burst search function
bsearch,
# Photon counting function,
mch_count_ph_in_bursts
)
from .phtools import phrates
from . import background as bg
from . import select_bursts
from . import fit
from .fit.gaussian_fitting import (gaussian_fit_hist,
gaussian_fit_cdf,
two_gaussian_fit_hist,
two_gaussian_fit_hist_min,
two_gaussian_fit_hist_min_ab,
two_gaussian_fit_EM,
two_gauss_mix_pdf,
two_gauss_mix_ab,)
# Redefine some old functions that have been renamed so old scripts will not
# break but will print a warning
bg_calc_exp = deprecate(bg.exp_fit, 'bg_calc_exp', 'bg.exp_fit')
bg_calc_exp_cdf = deprecate(bg.exp_cdf_fit, 'bg_calc_exp_cdf', 'bg.exp_cdf_fit')
def _get_bsearch_func(pure_python=False):
if pure_python:
# return the python version
return bslib.bsearch_py
else:
# or what is available
return bsearch
def _get_mch_count_ph_in_bursts_func(pure_python=False):
if pure_python:
# return the python version
return bslib.mch_count_ph_in_bursts_py
else:
# or what is available
return mch_count_ph_in_bursts
def isarray(obj):
"""Test if the object support the array interface.
Returns True for numpy arrays and pandas sequences.
"""
return hasattr(obj, '__array__')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# BURST SELECTION FUNCTIONS
#
def Sel(d_orig, filter_fun, negate=False, nofret=False, **kwargs):
"""Uses `filter_fun` to select a sub-set of bursts from `d_orig`.
This function is deprecated. Use :meth:`Data.select_bursts` instead.
"""
d_sel = d_orig.select_bursts(filter_fun, negate=negate,
computefret=not nofret,
**kwargs)
return d_sel
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Bursts and Timestamps utilities
#
def get_alex_fraction(on_range, alex_period):
"""Get the fraction of period beween two numbers indicating a range.
"""
assert len(on_range) == 2
if on_range[0] < on_range[1]:
fraction = (on_range[1] - on_range[0]) / alex_period
else:
fraction = (alex_period + on_range[1] - on_range[0]) / alex_period
return fraction
def top_tail(nx, a=0.1):
"""Return for each ch the mean size of the top `a` fraction.
nx is one of nd, na, nt from Data() (list of burst size in each ch).
"""
assert a > 0 and a < 1
return np.r_[[n[n > n.max() * (1 - a)].mean() for n in nx]]
##
# Per-burst quatitites from ph-data arrays (timestamps, lifetime, etc..)
#
def _excitation_width(excitation_range, alex_period):
"""Returns duration of alternation period outside selected excitation.
"""
if excitation_range[1] > excitation_range[0]:
return alex_period - excitation_range[1] + excitation_range[0]
elif excitation_range[1] < excitation_range[0]:
return excitation_range[0] - excitation_range[1]
def _ph_times_compact(ph_times_sel, alex_period, excitation_width):
"""Compact ph_times inplace by removing gaps between alternation periods.
Arguments:
ph_times_sel (array): array of timestamps from one alternation period.
alex_period (scalar): period of alternation in timestamp units.
excitation_width (float): fraction of `alex_period` covered by
current photon selection.
Returns nothing, ph_times is modified in-place.
"""
# The formula is
#
# gaps = (ph_times_sel // alex_period)*excitation_width
# ph_times_sel = ph_times_sel - gaps
#
# As a memory optimization the `-gaps` array is reused inplace
times_minusgaps = (ph_times_sel // alex_period) * (-1 * excitation_width)
# The formula is ph_times_sel = ph_times_sel - "gaps"
times_minusgaps += ph_times_sel
return times_minusgaps
def iter_bursts_start_stop(bursts):
"""Iterate over (start, stop) indexes to slice photons for each burst.
"""
arr_istart = bursts.istart
arr_istop = bursts.istop + 1
for istart, istop in zip(arr_istart, arr_istop):
yield istart, istop
def iter_bursts_ph(ph_data, bursts, mask=None, compact=False,
alex_period=None, excitation_width=None):
"""Iterator over arrays of photon-data for each burst.
Arguments:
ph_data (1D array): array of photon-data (timestamps, nanotimes).
bursts (Bursts object): bursts computed from `ph`.
mask (boolean mask or None): if not None, is a boolean mask
to select photons in `ph_data` (for example Donor-ch photons).
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
alex_period (scalar): period of alternation in timestamp units.
excitation_width (float): fraction of `alex_period` covered by
current photon selection.
Yields an array with a selection of "photons" for each burst.
"""
if isinstance(mask, slice) and mask == slice(None):
mask = None
if compact:
assert alex_period is not None
assert excitation_width is not None
assert mask is not None
for start, stop in iter_bursts_start_stop(bursts):
ph = ph_data[start:stop]
if mask is not None:
ph = ph[mask[start:stop]]
if compact:
ph = _ph_times_compact(ph, alex_period, excitation_width)
yield ph
def bursts_ph_list(ph_data, bursts, mask=None):
"""Returna list of ph-data for each burst.
ph_data can be either the timestamp array on which the burst search
has been performed or any other array with same size (boolean array,
nanotimes, etc...)
"""
return [ph for ph in iter_bursts_ph(ph_data, bursts, mask=mask)]
def burst_ph_stats(ph_data, bursts, func=np.mean, func_kw=None, **kwargs):
"""Reduce burst photons (timestamps, nanotimes) to a scalar using `func`.
Arguments
ph_data (1D array): array of photon-data (timestamps, nanotimes).
bursts (Bursts object): bursts computed from `ph`.
func (callable): function that takes the burst photon timestamps
as first argument and returns a scalar.
func_kw (callable): additional arguments in `func` beyond photon-data.
**kwargs: additional arguments passed to :func:`iter_bursts_ph`.
Return
Array one element per burst.
"""
if func_kw is None:
func_kw = {}
burst_stats = []
for burst_ph in iter_bursts_ph(ph_data, bursts, **kwargs):
burst_stats.append(func(burst_ph, **func_kw))
return np.asfarray(burst_stats) # NOTE: asfarray converts None to nan
def ph_in_bursts_mask(ph_data_size, bursts):
"""Return bool mask to select all "ph-data" inside any burst."""
mask = zeros(ph_data_size, dtype=bool)
for start, stop in iter_bursts_start_stop(bursts):
mask[start:stop] = True
return mask
def fuse_bursts_direct(bursts, ms=0, clk_p=12.5e-9, verbose=True):
"""Fuse bursts separated by less than `ms` (milli-seconds).
This function is a direct implementation using a single loop.
For a faster implementation see :func:`fuse_bursts_iter`.
Parameters:
bursts (BurstsGap object): bursts to be fused.
See `phtools.burstsearch` for details.
ms (float): minimum waiting time between bursts (in millisec).
Bursts closer than that will be fused in a single burst.
clk_p (float): clock period or timestamp units in seconds.
verbose (bool): if True print a summary of fused bursts.
Returns:
A BurstsGap object containing the new fused bursts.
"""
max_delay_clk = (ms * 1e-3) / clk_p
fused_bursts_list = []
fused_burst = None
for burst1, burst2 in zip(bursts[:-1], bursts[1:]):
if fused_burst is not None:
burst1c = fused_burst
else:
burst1c = bslib.BurstGap.from_burst(burst1)
separation = burst2.start - burst1c.stop
if separation <= max_delay_clk:
gap = burst2.start - burst1c.stop
gap_counts = burst2.istart - burst1c.istop - 1
if burst1c.istop >= burst2.istart:
gap = 0
gap_counts = 0
fused_burst = bslib.BurstGap(
start = burst1c.start,
istart = burst1c.istart,
stop = burst2.stop,
istop = burst2.istop,
gap = burst1c.gap + gap,
gap_counts = burst1c.gap_counts + gap_counts)
else:
if fused_burst is not None:
fused_bursts_list.append(fused_burst)
fused_burst = None
else:
fused_bursts_list.append(bslib.BurstGap.from_burst(burst1c))
# Append the last bursts (either a fused or an isolated one)
if fused_burst is not None:
fused_bursts_list.append(fused_burst)
else:
fused_bursts_list.append(bslib.BurstGap.from_burst(burst2))
fused_bursts = bslib.BurstsGap.from_list(fused_bursts_list)
init_num_bursts = bursts.num_bursts
delta_b = init_num_bursts - fused_bursts.num_bursts
pprint(" --> END Fused %d bursts (%.1f%%)\n\n" %
(delta_b, 100 * delta_b / init_num_bursts), mute=not verbose)
return fused_bursts
def fuse_bursts_iter(bursts, ms=0, clk_p=12.5e-9, verbose=True):
"""Fuse bursts separated by less than `ms` (milli-secs).
This function calls iteratively :func:`b_fuse` until there are no more
bursts to fuse. For a slower but more readable version see
:func:`fuse_bursts_direct`.
Parameters:
bursts (BurstsGap object): bursts to be fused.
See `phtools.burstsearch` for details.
ms (float): minimum waiting time between bursts (in millisec).
Bursts closer than that will be fused in a single burst.
clk_p (float): clock period or timestamp units in seconds.
verbose (bool): if True print a summary of fused bursts.
Returns:
A BurstsGap object containing the new fused bursts.
"""
init_nburst = bursts.num_bursts
bursts = bslib.BurstsGap(bursts.data)
z = 0
new_nburst, nburst = 0, 1 # starting condition
while new_nburst < nburst:
z += 1
nburst = bursts.num_bursts
bursts = b_fuse(bursts, ms=ms, clk_p=clk_p)
new_nburst = bursts.num_bursts
delta_b = init_nburst - nburst
pprint(" --> END Fused %d bursts (%.1f%%, %d iter)\n\n" %
(delta_b, 100 * delta_b / init_nburst, z), mute=not verbose)
return bursts
def b_fuse(bursts, ms=0, clk_p=12.5e-9):
"""Fuse bursts separated by less than `ms` (milli-secs).
This is a low-level function which fuses pairs of consecutive
bursts separated by less than `ms` millisec.
If there are 3 or more consecutive bursts separated by less than `ms`
only the first 2 are fused.
See :func:`fuse_bursts_iter` or :func:`fuse_bursts_direct` for
higher level functions.
Parameters:
bursts (BurstsGap object): bursts to be fused.
See `phtools.burstsearch` for details.
ms (float): minimum waiting time between bursts (in millisec).
Bursts closer than that will be fused in a single burst.
clk_p (float): clock period or timestamp units in seconds.
Returns:
A BurstsGap object containing the new fused bursts.
"""
max_delay_clk = (ms * 1e-3) / clk_p
# Nearby bursts masks
delays_below_th = (bursts.separation <= max_delay_clk)
if not np.any(delays_below_th):
return bursts
buffer_mask = np.hstack([(False,), delays_below_th, (False,)])
first_bursts = buffer_mask[1:]
second_bursts = buffer_mask[:-1]
# Keep only the first pair in case of more than 2 consecutive bursts
first_bursts ^= (second_bursts * first_bursts)
# note that previous in-place operation also modifies `second_bursts`
both_bursts = first_bursts + second_bursts
# istart is from the first burst, istop is from the second burst
fused_bursts1 = bursts[first_bursts]
fused_bursts2 = bursts[second_bursts]
# Compute gap and gap_counts
gap = fused_bursts2.start - fused_bursts1.stop
gap_counts = fused_bursts2.istart - fused_bursts1.istop - 1 # yes it's -1
overlaping = fused_bursts1.istop >= fused_bursts2.istart
gap[overlaping] = 0
gap_counts[overlaping] = 0
# Assign the new burst data
# fused_bursts1 has alredy the right start and istart
fused_bursts1.istop = fused_bursts2.istop
fused_bursts1.stop = fused_bursts2.stop
fused_bursts1.gap += gap
fused_bursts1.gap_counts += gap_counts
# Join fused bursts with the remaining bursts
new_burst = fused_bursts1.join(bursts[~both_bursts], sort=True)
return new_burst
def mch_fuse_bursts(MBurst, ms=0, clk_p=12.5e-9, verbose=True):
"""Multi-ch version of `fuse_bursts`. `MBurst` is a list of Bursts objects.
"""
mburst = [b.copy() for b in MBurst] # safety copy
new_mburst = []
ch = 0
for mb in mburst:
ch += 1
pprint(" - - - - - CHANNEL %2d - - - - \n" % ch, not verbose)
if mb.num_bursts == 0:
new_bursts = bslib.Bursts.empty()
else:
new_bursts = fuse_bursts_iter(mb, ms=ms, clk_p=clk_p,
verbose=verbose)
new_mburst.append(new_bursts)
return new_mburst
def burst_stats(mburst, clk_p):
"""Compute average duration, size and burst-delay for bursts in mburst.
"""
nans = [np.nan, np.nan]
width_stats = np.array([[b.width.mean(), b.width.std()]
if b.num_bursts > 0 else nans for b in mburst]).T
height_stats = np.array([[b.counts.mean(), b.counts.std()]
if b.num_bursts > 0 else nans for b in mburst]).T
mean_burst_delay = np.array([b.separation.mean() if b.num_bursts > 0
else np.nan for b in mburst])
return (clk_to_s(width_stats, clk_p) * 1e3, height_stats,
clk_to_s(mean_burst_delay, clk_p))
def print_burst_stats(d):
"""Print some bursts statistics."""
nch = len(d.mburst)
width_ms, height, delays = burst_stats(d.mburst, d.clk_p)
s = "\nNUMBER OF BURSTS: m = %d, L = %d" % (d.m, d.L)
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\n#: "+"%7d "*nch % tuple([b.num_bursts for b in d.mburst])
s += "\nT (us) [BS par] "+"%7d "*nch % tuple(np.array(d.T)*1e6)
s += "\nBG Rat T (cps): "+"%7d "*nch % tuple(d.bg_mean[Ph_sel('all')])
s += "\nBG Rat D (cps): "+"%7d "*nch % tuple(d.bg_mean[Ph_sel(Dex='Dem')])
s += "\nBG Rat A (cps): "+"%7d "*nch % tuple(d.bg_mean[Ph_sel(Dex='Aem')])
s += "\n\nBURST WIDTH STATS"
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\nMean (ms): "+"%7.3f "*nch % tuple(width_ms[0, :])
s += "\nStd.dev (ms): "+"%7.3f "*nch % tuple(width_ms[1, :])
s += "\n\nBURST SIZE STATS"
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\nMean (# ph): "+"%7.2f "*nch % tuple(height[0, :])
s += "\nStd.dev (# ph): "+"%7.2f "*nch % tuple(height[1, :])
s += "\n\nBURST MEAN DELAY"
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\nDelay (s): "+"%7.3f "*nch % tuple(delays)
return s
def ES_histog(E, S, bin_step=0.05, E_bins=None, S_bins=None):
"""Returns 2D (ALEX) histogram and bins of bursts (E,S).
"""
if E_bins is None:
E_bins = np.arange(-0.6, 1.6+1e-4, bin_step)
if S_bins is None:
S_bins = np.arange(-0.6, 1.6+1e-4, bin_step)
H, E_bins, S_bins = np.histogram2d(E, S, bins=[E_bins, S_bins])
return H, E_bins, S_bins
def delta(x):
"""Return x.max() - x.min()"""
return x.max() - x.min()
def mask_empty(mask):
"""Returns True if `mask` is empty, otherwise False.
`mask` can be a boolean array or a slice object.
"""
if isinstance(mask, slice):
is_slice_empty = (mask.stop == 0)
return is_slice_empty
else:
# Bolean array
return not mask.any()
class DataContainer(dict):
"""
Generic class for storing data.
It's a dictionary in which each key is also an attribute d['nt'] or d.nt.
"""
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
for k in self:
dict.__setattr__(self, k, self[k])
def add(self, **kwargs):
"""Adds or updates elements (attributes and/or dict entries). """
self.update(**kwargs)
for k, v in kwargs.items():
setattr(self, k, v)
def delete(self, *args, **kwargs):
"""Delete an element (attribute and/or dict entry). """
warning = kwargs.get('warning', True)
for name in args:
try:
self.pop(name)
except KeyError:
if warning:
print(' WARNING: Name %s not found (dict).' % name)
try:
delattr(self, name)
except AttributeError:
if warning:
print(' WARNING: Name %s not found (attr).' % name)
class Data(DataContainer):
"""
Container for all the information (timestamps, bursts) of a dataset.
Data() contains all the information of a dataset (name, timestamps, bursts,
correction factors) and provides several methods to perform analysis
(background estimation, burst search, FRET fitting, etc...).
When loading a measurement file a Data() object is created by one
of the loader functions in `loaders.py`. Data() objects can be also
created with :meth:`Data.copy`, :meth:`Data.fuse_bursts()` or
:meth:`Data.select_bursts`.
To add or delete data-attributes use `.add()` or `.delete()` methods.
All the standard data-attributes are listed below.
Note:
Attributes of type "*list*" contain one element per channel.
Each element, in turn, can be an array. For example `.ph_times_m[i]`
is the array of timestamps for channel `i`; or `.nd[i]` is the array
of donor counts in each burst for channel `i`.
**Measurement attributes**
Attributes:
fname (string): measurements file name
nch (int): number of channels
clk_p (float): clock period in seconds for timestamps in `ph_times_m`
ph_times_m (list): list of timestamp arrays (int64). Each array
contains all the timestamps (donor+acceptor) in one channel.
A_em (list): list of boolean arrays marking acceptor timestamps. Each
array is a boolean mask for the corresponding ph_times_m array.
leakage (float or array of floats): leakage (or bleed-through) fraction.
May be scalar or same size as nch.
gamma (float or array of floats): gamma factor.
May be scalar or same size as nch.
D_em (list of boolean arrays): **[ALEX-only]**
boolean mask for `.ph_times_m[i]` for donor emission
D_ex, A_ex (list of boolean arrays): **[ALEX-only]**
boolean mask for `.ph_times_m[i]` during donor or acceptor
excitation
D_ON, A_ON (2-element tuples of int ): **[ALEX-only]**
start-end values for donor and acceptor excitation selection.
alex_period (int): **[ALEX-only]**
duration of the alternation period in clock cycles.
**Background Attributes**
The background is computed with :meth:`Data.calc_bg`
and is estimated in chunks of equal duration called *background periods*.
Estimations are performed in each spot and photon stream.
The following attributes contain the estimated background rate.
Attributes:
bg (dict): background rates for the different photon streams,
channels and background periods. Keys are `Ph_sel` objects
and values are lists (one element per channel) of arrays (one
element per background period) of background rates.
bg_mean (dict): mean background rates across the entire measurement
for the different photon streams and channels. Keys are `Ph_sel`
objects and values are lists (one element per channel) of
background rates.
nperiods (int): number of periods in which timestamps are split for
background calculation
bg_fun (function): function used to compute the background rates
Lim (list): each element of this list is a list of index pairs for
`.ph_times_m[i]` for **first** and **last** photon in each period.
Ph_p (list): each element in this list is a list of timestamps pairs
for **first** and **last** photon of each period.
bg_ph_sel (Ph_sel object): photon selection used by Lim and Ph_p.
See :mod:`fretbursts.ph_sel` for details.
Th_us (dict): thresholds in us used to select the tail of the
interphoton delay distribution. Keys are `Ph_sel` objects
and values are lists (one element per channel) of arrays (one
element per background period).
Additionlly, there are a few deprecated attributes (`bg_dd`, `bg_ad`,
`bg_da`, `bg_aa`, `rate_dd`, `rate_ad`, `rate_da`, `rate_aa` and `rate_m`)
which will be removed in a future version.
Please use :attr:`Data.bg` and :attr:`Data.bg_mean` instead.
**Burst search parameters (user input)**
These are the parameters used to perform the burst search
(see :meth:`burst_search`).
Attributes:
ph_sel (Ph_sel object): photon selection used for burst search.
See :mod:`fretbursts.ph_sel` for details.
m (int): number of consecutive timestamps used to compute the
local rate during burst search
L (int): min. number of photons for a burst to be identified and saved
P (float, probability): valid values [0..1].
Probability that a burst-start is due to a Poisson background.
The employed Poisson rate is the one computed by `.calc_bg()`.
F (float): `(F * background_rate)` is the minimum rate for burst-start
**Burst search data (available after burst search)**
When not specified, parameters marked as (list of arrays) contains arrays
with one element per bursts. `mburst` arrays contain one "row" per burst.
`TT` arrays contain one element per `period` (see above: background
attributes).
Attributes:
mburst (list of Bursts objects): list Bursts() one element per channel.
See :class:`fretbursts.phtools.burstsearch.Bursts`.
TT (list of arrays): list of arrays of *T* values (in sec.). A *T*
value is the maximum delay between `m` photons to have a
burst-start. Each channels has an array of *T* values, one for
each background "period" (see above).
T (array): per-channel mean of `TT`
nd, na (list of arrays): number of donor or acceptor photons during
donor excitation in each burst
nt (list of arrays): total number photons (nd+na+naa)
naa (list of arrays): number of acceptor photons in each burst
during acceptor excitation **[ALEX only]**
nar (list of arrays): number of acceptor photons in each burst
during donor excitation, not corrected for D-leakage and
A-direct-excitation. **[PAX only]**
bp (list of arrays): time period for each burst. Same shape as `nd`.
This is needed to identify the background rate for each burst.
bg_bs (list): background rates used for threshold computation in burst
search (is a reference to `bg`, `bg_dd` or `bg_ad`).
fuse (None or float): if not None, the burst separation in ms below
which bursts have been fused (see `.fuse_bursts()`).
E (list): FRET efficiency value for each burst:
E = na/(na + gamma*nd).
S (list): stoichiometry value for each burst:
S = (gamma*nd + na) /(gamma*nd + na + naa)
"""
# Attribute names containing per-photon data.
# Each attribute is a list (1 element per ch) of arrays (1 element
# per photon).
ph_fields = ['ph_times_m', 'nanotimes', 'particles',
'A_em', 'D_em', 'A_ex', 'D_ex']
# Attribute names containing background data.
# The attribute `bg` is a dict with photon-selections as keys and
# list of arrays as values. Each list contains one element per channel and
# each array one element per background period.
# The attributes `.Lim` and `.Ph_p` are lists with one element per channel.
# Each element is a lists-of-tuples (one tuple per background period).
# These attributes do not exist before computing the background.
bg_fields = ['bg', 'Lim', 'Ph_p']
# Attribute names containing per-burst data.
# Each attribute is a list (1 element per ch) of arrays (1 element
# per burst).
# They do not necessarly exist. For example 'naa' exists only for ALEX
# data. Also none of them exist before performing a burst search.
burst_fields = ['E', 'S', 'mburst', 'nd', 'na', 'nt', 'bp', 'nda', 'naa',
'max_rate', 'sbr', 'nar']
# Quantities (scalars or arrays) defining the current set of bursts
burst_metadata = ['m', 'L', 'T', 'TT', 'F', 'FF', 'P', 'PP', 'rate_th',
'bg_bs', 'ph_sel', 'bg_corrected', 'leakage_corrected',
'dir_ex_corrected', 'dithering', 'fuse', 'lsb']
# List of photon selections on which the background is computed
_ph_streams = [Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem'),
Ph_sel(Aex='Dem'), Ph_sel(Aex='Aem')]
@property
def ph_streams(self):
if self.alternated:
return self._ph_streams
else:
return [Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem')]
def __init__(self, leakage=0., gamma=1., dir_ex=0., **kwargs):
# Default values
init_kw = dict(ALEX=False, _leakage=float(leakage), _gamma=float(gamma),
_dir_ex=float(dir_ex), _beta=1., _chi_ch=1., s=[])
# Override with user data
init_kw.update(**kwargs)
DataContainer.__init__(self, **init_kw)
# def __getattr__(self, name):
# """Single-channel shortcuts for per-channel fields.
#
# Appending a '_' to a per-channel field avoids specifying the channel.
# For example use d.nd_ instead if d.nd[0].
# """
# msg_missing_attr = "'%s' object has no attribute '%s'" %\
# (self.__class__.__name__, name)
# if name.startswith('_') or not name.endswith('_'):
# raise AttributeError(msg_missing_attr)
#
# field = name[:-1]
# try:
# value = self.__getitem__(field)
# except KeyError:
# raise AttributeError(msg_missing_attr)
# else:
# # Support lists, tuples and object with array interface
# if isinstance(value, (list, tuple)) or isarray(value):
# if len(value) == self.nch:
# return value[0]
# raise ValueError('Name "%s" is not a per-channel field.' % field)
def copy(self, mute=False):
"""Copy data in a new object. All arrays copied except for ph_times_m
"""
pprint('Deep copy executed.\n', mute)
new_d = Data(**self) # this make a shallow copy (like a pointer)
# Deep copy (not just reference) or array data
for field in self.burst_fields + self.bg_fields:
# Making sure k is defined
if field in self:
# Make a deepcopy of the per-channel lists
new_d[field] = copy.deepcopy(self[field])
# Set the attribute: new_d.k = new_d[k]
setattr(new_d, field, new_d[field])
return new_d
##
# Methods for photon timestamps (ph_times_m) access
#
def ph_times_hash(self, hash_name='md5', hexdigest=True):
"""Return an hash for the timestamps arrays.
"""
m = hashlib.new(hash_name)
for ph in self.iter_ph_times():
if isinstance(ph, np.ndarray):
m.update(ph.data)
else:
# TODO Handle ph_times in PyTables files
raise NotImplementedError
if hexdigest:
return m.hexdigest()
else:
return m
@property
def ph_data_sizes(self):
"""Array of total number of photons (ph-data) for each channel.
"""
if not hasattr(self, '_ph_data_sizes'):
# This works both for numpy arrays and pytables arrays
self._ph_data_sizes = np.array([ph.shape[0] for ph in
self.ph_times_m])
return self._ph_data_sizes
def _fix_ph_sel(self, ph_sel):
"""For non-ALEX data fix Aex to allow stable comparison."""
msg = 'Photon selection must be of type `Ph_sel` (it was `%s` instead).'
assert isinstance(ph_sel, Ph_sel), (msg % type(ph_sel))
if self.alternated or ph_sel.Dex != 'DAem':
return ph_sel
else:
return Ph_sel(Dex=ph_sel.Dex, Aex='DAem')
def _is_allph(self, ph_sel):
"""Return whether a photon selection `ph_sel` covers all photon."""
if self.alternated:
return ph_sel == Ph_sel(Dex='DAem', Aex='DAem')
else:
return ph_sel.Dex == 'DAem'
def get_ph_mask(self, ich=0, ph_sel=Ph_sel('all')):
"""Returns a mask for `ph_sel` photons in channel `ich`.
The masks are either boolean arrays or slices (full or empty). In
both cases they can be used to index the timestamps of the
corresponding channel.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
"""
assert isinstance(ich, int)
if self._is_allph(ph_sel):
# Note that slice(None) is equivalent to [:].
# Also, numpy arrays are not copied when sliced.
# So getting all photons with this mask is efficient
# Note: the drawback is that the slice cannot be indexed
# (where a normal boolean array would)
return slice(None)
# Handle the case when A_em contains slice objects
if isinstance(self.A_em[ich], slice):
if self.A_em[ich] == slice(None):
if ph_sel.Dex == 'Dem':
return slice(0)
if ph_sel.Dex == 'Aem':
return slice(None)
elif self.A_em[ich] == slice(0):
if ph_sel.Dex == 'Dem':
return slice(None)
if ph_sel.Dex == 'Aem':
return slice(0)
else:
msg = 'When a slice, A_em can only be slice(None) or slice(0).'
raise NotImplementedError(msg)
# Base selections
elif ph_sel == Ph_sel(Dex='Dem'):
return self.get_D_em_D_ex(ich)
elif ph_sel == Ph_sel(Dex='Aem'):
return self.get_A_em_D_ex(ich)
elif ph_sel == Ph_sel(Aex='Dem'):
return self.get_D_em(ich) * self.get_A_ex(ich)
elif ph_sel == Ph_sel(Aex='Aem'):
return self.get_A_em(ich) * self.get_A_ex(ich)
# Selection of all photon in one emission ch
elif ph_sel == Ph_sel(Dex='Dem', Aex='Dem'):
return self.get_D_em(ich)
elif ph_sel == Ph_sel(Dex='Aem', Aex='Aem'):
return self.get_A_em(ich)
# Selection of all photon in one excitation period
elif ph_sel == Ph_sel(Dex='DAem'):
return self.get_D_ex(ich)
elif ph_sel == Ph_sel(Aex='DAem'):
return self.get_A_ex(ich)
# Selection of all photons except for Dem during Aex
elif ph_sel == Ph_sel(Dex='DAem', Aex='Aem'):
return self.get_D_ex(ich) + self.get_A_em(ich) * self.get_A_ex(ich)
else:
raise ValueError('Photon selection not implemented.')
def iter_ph_masks(self, ph_sel=Ph_sel('all')):
"""Iterator returning masks for `ph_sel` photons.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
"""
for ich in range(self.nch):
yield self.get_ph_mask(ich, ph_sel=ph_sel)
def get_ph_times(self, ich=0, ph_sel=Ph_sel('all'), compact=False):
"""Returns the timestamps array for channel `ich`.
This method always returns in-memory arrays, even when ph_times_m
is a disk-backed list of arrays.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
"""
ph = self.ph_times_m[ich]
# If not a list is an on-disk array, we need to load it
if not isinstance(ph, np.ndarray):
if hasattr(self, '_ph_cache') and self._ph_cache_ich == ich:
ph = self._ph_cache
else:
ph = ph.read()
self._ph_cache = ph
self._ph_cache_ich = ich
ph = ph[self.get_ph_mask(ich, ph_sel=ph_sel)]
if compact:
ph = self._ph_times_compact(ph, ph_sel)
return ph
def iter_ph_times(self, ph_sel=Ph_sel('all'), compact=False):
"""Iterator that returns the arrays of timestamps in `.ph_times_m`.
Arguments:
Same arguments as :meth:`get_ph_mask` except for `ich`.
"""
for ich in range(self.nch):
yield self.get_ph_times(ich, ph_sel=ph_sel, compact=compact)
def _get_ph_mask_single(self, ich, mask_name, negate=False):
"""Get the bool array `mask_name` for channel `ich`.
If the internal "bool array" is a scalar return a slice (full or empty)
"""
mask = np.asarray(getattr(self, mask_name)[ich])
if negate:
mask = np.logical_not(mask)
if len(mask.shape) == 0:
# If mask is a boolean scalar, select all or nothing
mask = slice(None) if mask else slice(0)
return mask
def get_A_em(self, ich=0):
"""Returns a mask to select photons detected in the acceptor ch."""
return self._get_ph_mask_single(ich, 'A_em')
def get_D_em(self, ich=0):
"""Returns a mask to select photons detected in the donor ch."""
return self._get_ph_mask_single(ich, 'A_em', negate=True)
def get_A_ex(self, ich=0):
"""Returns a mask to select photons in acceptor-excitation periods."""
return self._get_ph_mask_single(ich, 'A_ex')
def get_D_ex(self, ich=0):
"""Returns a mask to select photons in donor-excitation periods."""
if self.alternated:
return self._get_ph_mask_single(ich, 'D_ex')
else:
return slice(None)
def get_D_em_D_ex(self, ich=0):
"""Returns a mask of donor photons during donor-excitation."""
if self.alternated:
return self.get_D_em(ich) * self.get_D_ex(ich)
else:
return self.get_D_em(ich)
def get_A_em_D_ex(self, ich=0):
"""Returns a mask of acceptor photons during donor-excitation."""
if self.alternated:
return self.get_A_em(ich) * self.get_D_ex(ich)
else:
return self.get_A_em(ich)
def iter_ph_times_period(self, ich=0, ph_sel=Ph_sel('all')):
"""Iterate through arrays of ph timestamps in each background period.
"""
mask = self.get_ph_mask(ich=ich, ph_sel=ph_sel)
for period in range(self.nperiods):
yield self.get_ph_times_period(period, ich=ich, mask=mask)
def get_ph_times_period(self, period, ich=0, ph_sel=Ph_sel('all'),
mask=None):
"""Return the array of ph_times in `period`, `ich` and `ph_sel`.
"""
istart, iend = self.Lim[ich][period]
period_slice = slice(istart, iend + 1)
ph_times = self.get_ph_times(ich=ich)
if mask is None:
mask = self.get_ph_mask(ich=ich, ph_sel=ph_sel)
if isinstance(mask, slice) and mask == slice(None):
ph_times_period = ph_times[period_slice]
else:
ph_times_period = ph_times[period_slice][mask[period_slice]]
return ph_times_period
def _assert_compact(self, ph_sel):
msg = ('Option compact=True requires a photon selection \n'
'from a single excitation period (either Dex or Aex).')
if not self.alternated:
raise ValueError('Option compact=True requires ALEX data.')
if ph_sel.Dex is not None and ph_sel.Aex is not None:
raise ValueError(msg)
def _excitation_width(self, ph_sel, ich=0):
"""Returns duration of alternation period outside selected excitation.
"""
self._assert_compact(ph_sel)
if ph_sel.Aex is None:
excitation_range = self._D_ON_multich[ich]
elif ph_sel.Dex is None:
excitation_range = self._A_ON_multich[ich]
return _excitation_width(excitation_range, self.alex_period)
def _ph_times_compact(self, ph, ph_sel):
"""Return timestamps in one excitation period with "gaps" removed.
It takes timestamps in the specified alternation period and removes
gaps due to time intervals outside the alternation period selection.
This allows to correct the photon rates distorsion due to alternation.
Arguments:
ph (array): timestamps array from which gaps have to be removed.
This array **is modified in-place**.
ph_sel (Ph_sel object): photon selection to be compacted.
Note that only one excitation must be specified, but the
emission can be 'Dem', 'Aem' or 'DAem'.
See :mod:`fretbursts.ph_sel` for details.
Returns:
Array of timestamps in one excitation periods with "gaps" removed.
"""
excitation_width = self._excitation_width(ph_sel)
return _ph_times_compact(ph, self.alex_period, excitation_width)
def _get_tuple_multich(self, name):
"""Get a n-element tuple field in multi-ch format (1 row per ch)."""
field = np.array(self[name])
if field.ndim == 1:
field = np.repeat([field], self.nch, axis=0)
return field
@property
def _D_ON_multich(self):
return self._get_tuple_multich('D_ON')
@property
def _A_ON_multich(self):
return self._get_tuple_multich('A_ON')
@property
def _det_donor_accept_multich(self):
return self._get_tuple_multich('det_donor_accept')
##
# Methods and properties for burst-data access
#
@property
def num_bursts(self):
"""Array of number of bursts in each channel."""
return np.array([bursts.num_bursts for bursts in self.mburst])
@property
def burst_widths(self):
"""List of arrays of burst duration in seconds. One array per channel.
"""
return [bursts.width * self.clk_p for bursts in self.mburst]
def burst_sizes_pax_ich(self, ich=0, gamma=1., add_aex=True,
beta=1., donor_ref=True, aex_corr=True):
r"""Return corrected burst sizes for channel `ich`. PAX-only.
When `donor_ref = False`, the formula for PAX-enhanced burst size is:
.. math::
\gamma(F_{D_{ex}D_{em}} + F_{DA_{ex}D_{em}}) +
\frac{1}{\alpha} F_{FRET}
where :math:`\alpha` is the Dex duty-cycle (0.5 if alternation
periods are equal) and :math:`F_{FRET}` is `na`, the AemAex
signal after leakage and direct-excitation corrections.
If `add_ex = True`, we add the term:
.. math::
\tilde{F}_{A_{ex}A_{em}} / (\alpha\beta)
where :math:`\tilde{F}_{A_{ex}A_{em}}` in A emission due to
A excitation (and not due to FRET).
If `aex_corr = False`, then :math:`\alpha` is fixed to 1.
If `donor_ref = True`, the above burst size expression is divided by
:math:`\gamma`.
Arguments:
ich (int): the spot number, only relevant for multi-spot.
In single-spot data there is only one channel (`ich=0`)
so this argument may be omitted. Default 0.
gamma (float): coefficient for gamma correction of burst
sizes. Default: 1. For more info see explanation above.
donor_ref (bool): True or False select different conventions
for burst size correction. For details see
:meth:`fretbursts.burstlib.Data.burst_sizes_ich`.
add_aex (boolean): when True, the returned burst size also
includes photons detected during the DAex. Default is True.
aex_corr (bool): If True, and `add_aex == True`, then divide
the DAexAem term (naa) by the Dex duty cycle. For example,
if Dex and DAex alternation periods are equal, naa is
multiplied by 2. This correction makes the returned value
equal to the denominator of the stoichiometry ratio S_pax
(PAX-enhanced formula). If False, naa is not divided by
the Dex duty-cycle (gamma and beta corrections may still be
applied). If `add_aex == False`, `aex_corr` is ignored.
beta (float): beta correction factor used for the DAexAem term
(naa) of the burst size.
If `add_aex == False` this argument is ignored. Default 1.
Returns
Array of burst sizes for channel `ich`.
See also:
:meth:`Data.burst_sizes_ich`
"""
assert 'PAX' in self.meas_type
naa = self._get_naa_ich(ich) # nar-subtracted
aex_dex_ratio = self._aex_dex_ratio()
alpha = 1
if aex_corr:
alpha = 1 - self._aex_fraction() # Dex duty-cycle
burst_size_dex = self.nd[ich] * gamma + self.na[ich]
burst_size_aex = (self.nda[ich] * gamma +
self.na[ich] * aex_dex_ratio +
naa / (alpha * beta))
burst_size = burst_size_dex
if add_aex:
burst_size += burst_size_aex
if donor_ref:
burst_size /= gamma
return burst_size
def burst_sizes_ich(self, ich=0, gamma=1., add_naa=False,
beta=1., donor_ref=True):
"""Return gamma corrected burst sizes for channel `ich`.
If `donor_ref == True` (default) the gamma corrected burst size is
computed according to::
1) nd + na / gamma
Otherwise, if `donor_ref == False`, the gamma corrected burst size is::
2) nd * gamma + na
With the definition (1) the corrected burst size is equal to the raw
burst size for zero-FRET or D-only bursts (that's why is `donor_ref`).
With the definition (2) the corrected burst size is equal to the raw
burst size for 100%-FRET bursts.
In an ALEX measurement, use `add_naa = True` to add counts from
AexAem stream to the returned burst size. The argument `gamma` and
`beta` are used to correctly scale `naa` so that it become
commensurate with the Dex corrected burst size. In particular,
when using definition (1) (i.e. `donor_ref = True`), the total
burst size is::
(nd + na/gamma) + naa / (beta * gamma)
Conversely, when using definition (2) (`donor_ref = False`), the
total burst size is::
(nd * gamma + na) + naa / beta
Arguments:
ich (int): the spot number, only relevant for multi-spot.
In single-spot data there is only one channel (`ich=0`)
so this argument may be omitted. Default 0.
add_naa (boolean): when True, add a term for AexAem photons when
computing burst size. Default False.
gamma (float): coefficient for gamma correction of burst
sizes. Default: 1. For more info see explanation above.
beta (float): beta correction factor used for the AexAem term
of the burst size. Default 1. If `add_naa = False` or
measurement is not ALEX this argument is ignored.
For more info see explanation above.
donor_ref (bool): select the convention for burst size correction.
See details above in the function description.
Returns
Array of burst sizes for channel `ich`.
See also :meth:`fretbursts.burstlib.Data.get_naa_corrected`.
"""
if donor_ref:
burst_size = self.nd[ich] + self.na[ich] / gamma
else:
burst_size = self.nd[ich] * gamma + self.na[ich]
if add_naa and self.alternated:
kws = dict(ich=ich, gamma=gamma, beta=beta, donor_ref=donor_ref)
burst_size += self.get_naa_corrected(**kws)
return burst_size
def get_naa_corrected(self, ich=0, gamma=1., beta=1., donor_ref=True):
"""Return corrected naa array for channel `ich`.
Arguments:
ich (int): the spot number, only relevant for multi-spot.
gamma (floats): gamma-factor to use in computing the corrected naa.
beta (float): beta-factor to use in computing the corrected naa.
donor_ref (bool): Select the convention for `naa` correction.
If True (default), uses `naa / (beta * gamma)`. Otherwise,
uses `naa / beta`. A consistent convention should be used
for the corrected Dex burst size in order to make it
commensurable with naa.
See also :meth:`fretbursts.burstlib.Data.burst_sizes_ich`.
"""
naa = self._get_naa_ich(ich) # with eventual duty-cycle correction
if donor_ref:
correction = (gamma * beta)
else:
correction = beta
return naa / correction
def _get_naa_ich(self, ich=0):
"""Return naa for `ich` both in ALEX and PAX measurements.
In case of PAX, returns naa using the duty-cycle correction::
naa = self.naa - aex_dex_ratio * self.nar
where `self.nar` is equal to `self.na` before leakage and direct
excitation correction, and `aex_dex_ratio` is the Aex duty-cycle.
"""
naa = self.naa[ich]
if 'PAX' in self.meas_type:
# ATTENTION: do not modify naa inplace
naa = naa - self._aex_dex_ratio() * self.nar[ich]
return naa
def burst_sizes(self, gamma=1., add_naa=False, beta=1., donor_ref=True):
"""Return gamma corrected burst sizes for all the channel.
Compute burst sizes by calling, for each channel,
:meth:`burst_sizes_ich`.
See :meth:`burst_sizes_ich` for description of the arguments.
Returns
List of arrays of burst sizes, one array per channel.
"""
kwargs = dict(gamma=gamma, add_naa=add_naa, beta=beta,
donor_ref=donor_ref)
bsize_list = [self.burst_sizes_ich(ich, **kwargs) for ich in
range(self.nch)]
return np.array(bsize_list)
def iter_bursts_ph(self, ich=0):
"""Iterate over (start, stop) indexes to slice photons for each burst.
"""
for istart, istop in iter_bursts_start_stop(self.mburst[ich]):
yield istart, istop
def bursts_slice(self, N1=0, N2=-1):
"""Return new Data object with bursts between `N1` and `N2`
`N1` and `N2` can be scalars or lists (one per ch).
"""
if np.isscalar(N1): N1 = [N1] * self.nch
if np.isscalar(N2): N2 = [N2] * self.nch
assert len(N1) == len(N2) == self.nch
d = Data(**self)
d.add(mburst=[b[n1:n2].copy() for b, n1, n2 in zip(d.mburst, N1, N2)])
d.add(nt=[nt[n1:n2] for nt, n1, n2 in zip(d.nt, N1, N2)])
d.add(nd=[nd[n1:n2] for nd, n1, n2 in zip(d.nd, N1, N2)])
d.add(na=[na[n1:n2] for na, n1, n2 in zip(d.na, N1, N2)])
for name in ('naa', 'nda', 'nar'):
if name in d:
d.add(**{name:
[x[n1:n2] for x, n1, n2 in zip(d[name], N1, N2)]})
if 'nda' in self:
d.add(nda=[da[n1:n2] for da, n1, n2 in zip(d.nda, N1, N2)])
d.calc_fret(pax=self.pax) # recalculate fret efficiency
return d
def delete_burst_data(self):
"""Erase all the burst data"""
for name in self.burst_fields + self.burst_metadata:
if name in self:
self.delete(name)
for name in ('E_fitter', 'S_fitter'):
if hasattr(self, name):
delattr(self, name)
##
# Methods for high-level data transformation
#
def slice_ph(self, time_s1=0, time_s2=None, s='slice'):
"""Return a new Data object with ph in [`time_s1`,`time_s2`] (seconds)
If ALEX, this method must be called right after
:func:`fretbursts.loader.alex_apply_periods` (with `delete_ph_t=True`)
and before any background estimation or burst search.
"""
if time_s2 is None:
time_s2 = self.time_max
if time_s2 >= self.time_max and time_s1 <= 0:
return self.copy()
assert time_s1 < self.time_max
t1_clk, t2_clk = int(time_s1 / self.clk_p), int(time_s2 / self.clk_p)
masks = [(ph >= t1_clk) * (ph < t2_clk) for ph in self.iter_ph_times()]
new_d = Data(**self)
for name in self.ph_fields:
if name in self:
new_d[name] = [a[mask] for a, mask in zip(self[name], masks)]
setattr(new_d, name, new_d[name])
new_d.delete_burst_data()
# Shift timestamps to start from 0 to avoid problems with BG calc
for ich in range(self.nch):
ph_i = new_d.get_ph_times(ich)
ph_i -= t1_clk
new_d.s.append(s)
# Delete eventual cached properties
for attr in ['_time_min', '_time_max']:
if hasattr(new_d, attr):
delattr(new_d, attr)
return new_d
def collapse(self, update_gamma=True, skip_ch=None):
"""Returns an object with 1-spot data joining the multi-spot data.
Arguments:
skip_ch (tuple of ints): list of channels to skip.
If None, keep all channels.
update_gamma (bool): if True, recompute gamma as mean of the
per-channel gamma. If False, do not update gamma.
If True, gamma becomes a single value and the update has the
side effect of recomputing E and S values, discarding
previous per-channel corrections. If False, gamma is not
updated (it stays with multi-spot values) and E and S are
not recomputed.
Note:
When using `update_gamma=False`, burst selections on the
collapsed `Data` object should be done with
`computefret=False`, otherwise any attempt to use multi-spot
gamma for single-spot data will raise an error.
"""
dc = Data(**self)
mch_bursts = self.mburst
if skip_ch is not None:
mch_bursts = [bursts for i, bursts in enumerate(mch_bursts)
if i not in skip_ch]
bursts = bslib.Bursts.merge(mch_bursts, sort=False)
# Sort by start times, and when equal by stop times
indexsort = np.lexsort((bursts.stop, bursts.start))
dc.add(mburst=[bursts[indexsort]])
ich_burst = [i * np.ones(nb) for i, nb in enumerate(self.num_bursts)]
dc.add(ich_burst=np.hstack(ich_burst)[indexsort])
for name in self.burst_fields:
if name in self and name is not 'mburst':
# Concatenate arrays along axis = 0
value = [np.concatenate(self[name])[indexsort]]
dc.add(**{name: value})
dc.add(nch=1)
dc.add(_chi_ch=1.)
# NOTE: Updating gamma has the side effect of recomputing E
# (and S if ALEX). We need to update gamma because, in general,
# gamma can be an array with a value for each ch.
# However, the per-channel gamma correction is lost once both
# gamma and chi_ch are made scalar.
if update_gamma:
dc._update_gamma(np.mean(self.get_gamma_array()))
return dc
##
# Utility methods
#
def get_params(self):
"""Returns a plain dict containing only parameters and no arrays.
This can be used as a summary of data analysis parameters.
Additional keys `name' and `Names` are added with values
from `.name` and `.Name()`.
"""
p_names = ['fname', 'clk_p', 'nch', 'ph_sel', 'L', 'm', 'F', 'P',
'_leakage', '_dir_ex', '_gamma', 'bg_time_s',
'T', 'rate_th',
'bg_corrected', 'leakage_corrected', 'dir_ex_corrected',
'dithering', '_chi_ch', 's', 'ALEX']
p_dict = dict(self)
for name in p_dict.keys():
if name not in p_names:
p_dict.pop(name)
p_dict.update(name=self.name, Name=self.Name(), bg_mean=self.bg_mean,
nperiods=self.nperiods)
return p_dict
def expand(self, ich=0, alex_naa=False, width=False):
"""Return per-burst D and A sizes (nd, na) and their background counts.
This method returns for each bursts the corrected signal counts and
background counts in donor and acceptor channels. Optionally, the
burst width is also returned.
Arguments:
ich (int): channel for the bursts (can be not 0 only in multi-spot)
alex_naa (bool): if True and self.ALEX, returns burst sizes and
background also for acceptor photons during accept. excitation
width (bool): whether return the burst duration (in seconds).
Returns:
List of arrays: nd, na, donor bg, acceptor bg.
If `alex_naa` is True returns: nd, na, naa, bg_d, bg_a, bg_aa.
If `width` is True returns the bursts duration (in sec.) as last
element.
"""
period = self.bp[ich]
w = self.mburst[ich].width * self.clk_p
bg_a = self.bg[Ph_sel(Dex='Aem')][ich][period] * w
bg_d = self.bg[Ph_sel(Dex='Dem')][ich][period] * w
res = [self.nd[ich], self.na[ich]]
if self.alternated and alex_naa:
bg_aa = self.bg[Ph_sel(Aex='Aem')][ich][period] * w
res.extend([self.naa[ich], bg_d, bg_a, bg_aa])
else:
res.extend([bg_d, bg_a])
if width:
res.append(w)
return res
def burst_data_ich(self, ich):
"""Return a dict of burst data for channel `ich`."""
bursts = {}
bursts['size_raw'] = self.mburst[ich].counts
bursts['t_start'] = self.mburst[ich].start * self.clk_p
bursts['t_stop'] = self.mburst[ich].stop * self.clk_p
bursts['i_start'] = self.mburst[ich].istart
bursts['i_stop'] = self.mburst[ich].istop
period = bursts['bg_period'] = self.bp[ich]
width = self.mburst[ich].width * self.clk_p
bursts['width_ms'] = width * 1e3
bursts['bg_ad'] = self.bg[Ph_sel(Dex='Aem')][ich][period] * width
bursts['bg_dd'] = self.bg[Ph_sel(Dex='Dem')][ich][period] * width
if self.alternated:
bursts['bg_aa'] = self.bg[Ph_sel(Aex='Aem')][ich][period] * width
bursts['bg_da'] = self.bg[Ph_sel(Aex='Dem')][ich][period] * width
burst_fields = self.burst_fields[:]
burst_fields.remove('mburst')
burst_fields.remove('bp')
for field in burst_fields:
if field in self:
bursts[field] = self[field][ich]
return bursts
@property
def time_max(self):
"""The last recorded time in seconds."""
if not hasattr(self, '_time_max'):
self._time_max = self._time_reduce(last=True, func=max)
return self._time_max
@property
def time_min(self):
"""The first recorded time in seconds."""
if not hasattr(self, '_time_min'):
self._time_min = self._time_reduce(last=False, func=min)
return self._time_min
def _time_reduce(self, last=True, func=max):
"""Return first or last timestamp per-ch, reduced with `func`.
"""
idx = -1 if last else 0
# Get either ph_times_m or ph_times_t
ph_times = None
for ph_times_name in ['ph_times_m', 'ph_times_t']:
try:
ph_times = self[ph_times_name]
except KeyError:
pass
else:
break
if ph_times is not None:
# This works with both numpy arrays and pytables arrays
time = func(t[idx] for t in ph_times if t.shape[0] > 0)
elif 'mburst' in self:
if last:
time = func(bursts[idx].stop for bursts in self.mburst)
else:
time = func(bursts[idx].start for bursts in self.mburst)
else:
raise ValueError("No timestamps or bursts found.")
return time * self.clk_p
def ph_in_bursts_mask_ich(self, ich=0, ph_sel=Ph_sel('all')):
"""Return mask of all photons inside bursts for channel `ich`.
Returns
Boolean array for photons in channel `ich` and photon
selection `ph_sel` that are inside any burst.
"""
bursts_mask = ph_in_bursts_mask(self.ph_data_sizes[ich],
self.mburst[ich])
if self._is_allph(ph_sel):
return bursts_mask
else:
ph_sel_mask = self.get_ph_mask(ich=ich, ph_sel=ph_sel)
return ph_sel_mask * bursts_mask
def ph_in_bursts_ich(self, ich=0, ph_sel=Ph_sel('all')):
"""Return timestamps of photons inside bursts for channel `ich`.
Returns
Array of photon timestamps in channel `ich` and photon
selection `ph_sel` that are inside any burst.
"""
ph_all = self.get_ph_times(ich=ich)
bursts_mask = self.ph_in_bursts_mask_ich(ich, ph_sel)
return ph_all[bursts_mask]
##
# Background analysis methods
#
def _obsolete_bg_attr(self, attrname, ph_sel):
print('The Data.%s attribute is deprecated. Please use '
'Data.bg(%s) instead.' % (attrname, repr(ph_sel)))
bg_attrs = ('bg_dd', 'bg_ad', 'bg_da', 'bg_aa')
bg_mean_attrs = ('rate_m', 'rate_dd', 'rate_ad', 'rate_da', 'rate_aa')
assert attrname in bg_attrs or attrname in bg_mean_attrs
if attrname in bg_attrs:
bg_field = 'bg'
elif attrname in bg_mean_attrs:
bg_field = 'bg_mean'
try:
value = getattr(self, bg_field)[ph_sel]
except AttributeError as e:
# This only happens when trying to access 'bg' because
# 'bg_mean' raises RuntimeError when missing.
msg = 'No attribute `%s` found. Please compute background first.'
raise_from(RuntimeError(msg % bg_field), e)
return value
@property
def rate_m(self):
return self._obsolete_bg_attr('rate_m', Ph_sel('all'))
@property
def rate_dd(self):
return self._obsolete_bg_attr('rate_dd', Ph_sel(Dex='Dem'))
@property
def rate_ad(self):
return self._obsolete_bg_attr('rate_ad', Ph_sel(Dex='Aem'))
@property
def rate_da(self):
return self._obsolete_bg_attr('rate_da', Ph_sel(Aex='Dem'))
@property
def rate_aa(self):
return self._obsolete_bg_attr('rate_aa', Ph_sel(Aex='Aem'))
@property
def bg_dd(self):
return self._obsolete_bg_attr('bg_dd', Ph_sel(Dex='Dem'))
@property
def bg_ad(self):
return self._obsolete_bg_attr('bg_ad', Ph_sel(Dex='Aem'))
@property
def bg_da(self):
return self._obsolete_bg_attr('bg_da', Ph_sel(Aex='Dem'))
@property
def bg_aa(self):
return self._obsolete_bg_attr('bg_aa', Ph_sel(Aex='Aem'))
def calc_bg_cache(self, fun, time_s=60, tail_min_us=500, F_bg=2,
error_metrics=None, fit_allph=True,
recompute=False):
"""Compute time-dependent background rates for all the channels.
This version is the cached version of :meth:`calc_bg`.
This method tries to load the background data from a cache file.
If a saved background data is not found, it computes
the background and stores it to disk.
The arguments are the same as :meth:`calc_bg` with the only addition
of `recompute` (bool) to force a background recomputation even if
a cached version is found.
Form more details on the other arguments see :meth:`calc_bg`.
"""
bg_cache.calc_bg_cache(self, fun, time_s=time_s,
tail_min_us=tail_min_us, F_bg=F_bg,
error_metrics=error_metrics, fit_allph=fit_allph,
recompute=recompute)
def _get_auto_bg_th_arrays(self, F_bg=2, tail_min_us0=250):
"""Return a dict of threshold values for background estimation.
The keys are the ph selections in self.ph_streams and the values
are 1-D arrays of size nch.
"""
Th_us = {}
for ph_sel in self.ph_streams:
th_us = np.zeros(self.nch)
for ich, ph in enumerate(self.iter_ph_times(ph_sel=ph_sel)):
if ph.size > 0:
bg_rate, _ = bg.exp_fit(ph, tail_min_us=tail_min_us0)
th_us[ich] = 1e6 * F_bg / bg_rate
Th_us[ph_sel] = th_us
# Save the input used to generate Th_us
self.add(bg_auto_th_us0=tail_min_us0, bg_auto_F_bg=F_bg)
return Th_us
def _get_bg_th_arrays(self, tail_min_us, nperiods):
"""Return a dict of threshold values for background estimation.
The keys are the ph selections in self.ph_streams and the values
are 1-D arrays of size nch.
"""
n_streams = len(self.ph_streams)
if np.size(tail_min_us) == 1:
tail_min_us = np.repeat(tail_min_us, n_streams)
elif np.size(tail_min_us) == n_streams:
tail_min_us = np.asarray(tail_min_us)
elif np.size(tail_min_us) != n_streams:
raise ValueError('Wrong tail_min_us length (%d).' %
len(tail_min_us))
th_us = {}
for i, key in enumerate(self.ph_streams):
th_us[key] = np.ones(nperiods) * tail_min_us[i]
# Save the input used to generate Th_us
self.add(bg_th_us_user=tail_min_us)
return th_us
def _clean_bg_data(self):
"""Remove background fields specific of only one fit type.
Computing background with manual or 'auto' threshold results in
different sets of attributes being saved. This method removes these
attributes and should be called before recomputing the background
to avoid having old stale attributes of a previous background fit.
"""
# Attributes specific of manual or 'auto' bg fit
field_list = ['bg_auto_th_us0', 'bg_auto_F_bg', 'bg_th_us_user']
for field in field_list:
if field in self:
self.delete(field)
if hasattr(self, '_bg_mean'):
delattr(self, '_bg_mean')
def _get_num_periods(self, time_s):
"""Return the number of periods using `time_s` as period duration.
"""
duration = self.time_max - self.time_min
# Take the ceil to have at least 1 periods
nperiods = np.ceil(duration / time_s)
# Discard last period if negligibly small to avoid problems with
# background fit with very few photons.
if nperiods > 1:
last_period = self.time_max - time_s * (nperiods - 1)
# Discard last period if smaller than 3% of the bg period
if last_period < time_s * 0.03:
nperiods -= 1
return int(nperiods)
def calc_bg(self, fun, time_s=60, tail_min_us=500, F_bg=2,
error_metrics=None, fit_allph=True):
"""Compute time-dependent background rates for all the channels.
Compute background rates for donor, acceptor and both detectors.
The rates are computed every `time_s` seconds, allowing to
track possible variations during the measurement.
Arguments:
fun (function): function for background estimation (example
`bg.exp_fit`)
time_s (float, seconds): compute background each time_s seconds
tail_min_us (float, tuple or string): min threshold in us for
photon waiting times to use in background estimation.
If float is the same threshold for 'all', DD, AD and AA photons
and for all the channels.
If a 3 or 4 element tuple, each value is used for 'all', DD, AD
or AA photons, same value for all the channels.
If 'auto', the threshold is computed for each stream ('all',
DD, DA, AA) and for each channel as `bg_F * rate_ml0`.
`rate_ml0` is an initial estimation of the rate performed using
:func:`bg.exp_fit` and a fixed threshold (default 250us).
F_bg (float): when `tail_min_us` is 'auto', is the factor by which
the initial background estimation if multiplied to compute the
threshold.
error_metrics (string): Specifies the error metric to use.
See :func:`fretbursts.background.exp_fit` for more details.
fit_allph (bool): if True (default) the background for the
all-photon is fitted. If False it is computed as the sum of
backgrounds in all the other streams.
The background estimation functions are defined in the module
`background` (conventionally imported as `bg`).
Example:
Compute background with `bg.exp_fit` (inter-photon delays MLE
tail fitting), every 30s, with automatic tail-threshold::
d.calc_bg(bg.exp_fit, time_s=20, tail_min_us='auto')
Returns:
None, all the results are saved in the object itself.
"""
pprint(" - Calculating BG rates ... ")
self._clean_bg_data()
kwargs = dict(clk_p=self.clk_p, error_metrics=error_metrics)
nperiods = self._get_num_periods(time_s)
streams_noall = [s for s in self.ph_streams if s != Ph_sel('all')]
bg_auto_th = tail_min_us == 'auto'
if bg_auto_th:
tail_min_us0 = 250
self.add(bg_auto_th_us0=tail_min_us0, bg_auto_F_bg=F_bg)
auto_th_kwargs = dict(clk_p=self.clk_p, tail_min_us=tail_min_us0)
th_us = {}
for key in self.ph_streams:
th_us[key] = np.zeros(nperiods)
else:
th_us = self._get_bg_th_arrays(tail_min_us, nperiods)
Lim, Ph_p = [], []
BG, BG_err = [], []
Th_us = []
for ich, ph_ch in enumerate(self.iter_ph_times()):
masks = {sel: self.get_ph_mask(ich, ph_sel=sel)
for sel in self.ph_streams}
bins = ((np.arange(nperiods + 1) * time_s + self.time_min) /
self.clk_p)
# Note: histogram bins are half-open, e.g. [a, b)
counts, _ = np.histogram(ph_ch, bins=bins)
lim, ph_p = [], []
bg = {sel: np.zeros(nperiods) for sel in self.ph_streams}
bg_err = {sel: np.zeros(nperiods) for sel in self.ph_streams}
i1 = 0
for ip in range(nperiods):
i0 = i1
i1 += counts[ip]
lim.append((i0, i1 - 1))
ph_p.append((ph_ch[i0], ph_ch[i1 - 1]))
ph_i = ph_ch[i0:i1]
if fit_allph:
sel = Ph_sel('all')
if bg_auto_th:
_bg, _ = fun(ph_i, **auto_th_kwargs)
th_us[sel][ip] = 1e6 * F_bg / _bg
bg[sel][ip], bg_err[sel][ip] = \
fun(ph_i, tail_min_us=th_us[sel][ip], **kwargs)
for sel in streams_noall:
# This supports cases of D-only or A-only timestamps
# where self.A_em[ich] is a bool and not a bool-array
# In this case, the mask of either DexDem or DexAem is
# slice(None) (all-elements selection).
if isinstance(masks[sel], slice):
if masks[sel] == slice(None):
bg[sel][ip] = bg[Ph_sel('all')][ip]
bg_err[sel][ip] = bg_err[Ph_sel('all')][ip]
continue
else:
ph_i_sel = ph_i[masks[sel][i0:i1]]
if ph_i_sel.size > 0:
if bg_auto_th:
_bg, _ = fun(ph_i_sel, **auto_th_kwargs)
th_us[sel][ip] = 1e6 * F_bg / _bg
bg[sel][ip], bg_err[sel][ip] = \
fun(ph_i_sel, tail_min_us=th_us[sel][ip], **kwargs)
if not fit_allph:
bg[Ph_sel('all')] += sum(bg[s] for s in streams_noall)
bg_err[Ph_sel('all')] += sum(bg_err[s] for s in streams_noall)
Lim.append(lim)
Ph_p.append(ph_p)
BG.append(bg)
BG_err.append(bg_err)
Th_us.append(th_us)
# Make Dict Of Lists (DOL) from Lists of Dicts
BG_dol, BG_err_dol, Th_us_dol = {}, {}, {}
for sel in self.ph_streams:
BG_dol[sel] = [bg_ch[sel] for bg_ch in BG]
BG_err_dol[sel] = [err_ch[sel] for err_ch in BG_err]
Th_us_dol[sel] = [th_ch[sel] for th_ch in Th_us]
self.add(bg=BG_dol, bg_err=BG_err_dol, bg_th_us=Th_us_dol,
Lim=Lim, Ph_p=Ph_p,
bg_fun=fun, bg_fun_name=fun.__name__,
bg_time_s=time_s, bg_ph_sel=Ph_sel('all'),
bg_auto_th=bg_auto_th, # bool, True if the using auto-threshold
)
pprint("[DONE]\n")
@property
def nperiods(self):
return len(self.bg[Ph_sel('all')][0])
@property
def bg_mean(self):
if 'bg' not in self:
raise RuntimeError('No background found, compute it first.')
if not hasattr(self, '_bg_mean'):
self._bg_mean = {k: [bg_ch.mean() for bg_ch in bg_ph_sel]
for k, bg_ph_sel in self.bg.items()}
return self._bg_mean
def recompute_bg_lim_ph_p(self, ph_sel, mute=False):
"""Recompute self.Lim and selp.Ph_p relative to ph selection `ph_sel`
`ph_sel` is a Ph_sel object selecting the timestamps in which self.Lim
and self.Ph_p are being computed.
"""
ph_sel = self._fix_ph_sel(ph_sel)
if self.bg_ph_sel == ph_sel:
return
pprint(" - Recomputing background limits for %s ... " %
str(ph_sel), mute)
bg_time_clk = self.bg_time_s / self.clk_p
Lim, Ph_p = [], []
for ph_ch, lim in zip(self.iter_ph_times(ph_sel), self.Lim):
bins = np.arange(self.nperiods + 1) * bg_time_clk
# Note: histogram bins are half-open, e.g. [a, b)
counts, _ = np.histogram(ph_ch, bins=bins)
lim, ph_p = [], []
i1 = 0
for ip in range(self.nperiods):
i0 = i1
i1 += counts[ip]
lim.append((i0, i1 - 1))
ph_p.append((ph_ch[i0], ph_ch[i1-1]))
Lim.append(lim)
Ph_p.append(ph_p)
self.add(Lim=Lim, Ph_p=Ph_p, bg_ph_sel=ph_sel)
pprint("[DONE]\n", mute)
##
# Burst analysis methods
#
def _calc_burst_period(self):
"""Compute for each burst the "background period" `bp`.
Background periods are the time intervals on which the BG is computed.
"""
P = []
for b, lim in zip(self.mburst, self.Lim):
p = zeros(b.num_bursts, dtype=np.int16)
if b.num_bursts > 0:
istart = b.istart
for i, (l0, l1) in enumerate(lim):
p[(istart >= l0) * (istart <= l1)] = i
P.append(p)
self.add(bp=P)
def _param_as_mch_array(self, par):
"""Regardless of `par` size, return an arrays with size == nch.
if `par` is scalar the arrays repeats the calar multiple times
if `par is a list/array must be of length `nch`.
"""
assert size(par) == 1 or size(par) == self.nch
return np.repeat(par, self.nch) if size(par) == 1 else np.asarray(par)
def bg_from(self, ph_sel):
"""Return the background rates for the specified photon selection.
"""
ph_sel = self._fix_ph_sel(ph_sel)
if ph_sel in self.ph_streams:
return self.bg[ph_sel]
elif ph_sel == Ph_sel(Dex='DAem'):
sel = Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Aex='DAem'):
sel = Ph_sel(Aex='Dem'), Ph_sel(Aex='Aem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Dex='Dem', Aex='Dem'):
sel = Ph_sel(Dex='Dem'), Ph_sel(Aex='Dem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Dex='Aem', Aex='Aem'):
sel = Ph_sel(Dex='Aem'), Ph_sel(Aex='Aem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Dex='DAem', Aex='Aem'):
sel = (Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem'), Ph_sel(Aex='Aem'))
bg = [b1 + b2 + b3 for b1, b2, b3 in
zip(self.bg[sel[0]], self.bg[sel[1]], self.bg[sel[2]])]
else:
raise NotImplementedError('Photon selection %s not implemented.' %
str(ph_sel))
return bg
def _calc_T(self, m, P, F=1., ph_sel=Ph_sel('all'), c=-1):
"""If P is None use F, otherwise uses both P *and* F (F defaults to 1).
When P is None, compute the time lag T for burst search according to::
T = (m - 1 - c) / (F * bg_rate)
"""
# Regardless of F and P sizes, FF and PP are arrays with size == nch
FF = self._param_as_mch_array(F)
PP = self._param_as_mch_array(P)
if P is None:
# NOTE: the following lambda ignores Pi
find_T = lambda m, Fi, Pi, bg: (m - 1 - c) / (bg * Fi)
else:
if F != 1:
print("WARNING: BS prob. th. with modified BG rate (F=%.1f)"
% F)
find_T = lambda m, Fi, Pi, bg: find_optimal_T_bga(bg*Fi, m, 1-Pi)
TT, T, rate_th = [], [], []
bg_bs = self.bg_from(ph_sel)
for bg_ch, F_ch, P_ch in zip(bg_bs, FF, PP):
# All "T" are in seconds
Tch = find_T(m, F_ch, P_ch, bg_ch)
TT.append(Tch)
T.append(Tch.mean())
rate_th.append(np.mean(m / Tch))
self.add(TT=TT, T=T, bg_bs=bg_bs, FF=FF, PP=PP, F=F, P=P,
rate_th=rate_th)
def _burst_search_rate(self, m, L, min_rate_cps, c=-1, ph_sel=Ph_sel('all'),
compact=False, index_allph=True, verbose=True,
pure_python=False):
"""Compute burst search using a fixed minimum photon rate.
The burst starts when, for `m` consecutive photons::
(m - 1 - c) / (t[last] - t[first]) >= min_rate_cps
Arguments:
min_rate_cps (float or array): minimum photon rate for burst start.
If array is one value per channel.
"""
bsearch = _get_bsearch_func(pure_python=pure_python)
Min_rate_cps = self._param_as_mch_array(min_rate_cps)
mburst = []
T_clk = (m - 1 - c) / Min_rate_cps / self.clk_p
for ich, t_clk in enumerate(T_clk):
ph_bs = ph = self.get_ph_times(ich=ich, ph_sel=ph_sel)
if compact:
ph_bs = self._ph_times_compact(ph, ph_sel)
label = '%s CH%d' % (ph_sel, ich + 1) if verbose else None
burstarray = bsearch(ph_bs, L, m, t_clk, label=label, verbose=verbose)
if burstarray.size > 1:
bursts = bslib.Bursts(burstarray)
if compact:
bursts.recompute_times(ph, out=bursts)
else:
bursts = bslib.Bursts.empty()
mburst.append(bursts)
self.add(mburst=mburst, rate_th=Min_rate_cps, T=T_clk * self.clk_p)
if ph_sel != Ph_sel('all') and index_allph:
self._fix_mburst_from(ph_sel=ph_sel)
def _burst_search_TT(self, m, L, ph_sel=Ph_sel('all'), verbose=True,
compact=False, index_allph=True, pure_python=False,
mute=False):
"""Compute burst search with params `m`, `L` on ph selection `ph_sel`
Requires the list of arrays `self.TT` with the max time-thresholds in
the different burst periods for each channel (use `._calc_T()`).
"""
bsearch = _get_bsearch_func(pure_python=pure_python)
self.recompute_bg_lim_ph_p(ph_sel=ph_sel, mute=mute)
MBurst = []
label = ''
for ich, T in enumerate(self.TT):
ph_bs = ph = self.get_ph_times(ich=ich, ph_sel=ph_sel)
if compact:
ph_bs = self._ph_times_compact(ph, ph_sel)
burstarray_ch_list = []
Tck = T / self.clk_p
for ip, (l0, l1) in enumerate(self.Lim[ich]):
if verbose:
label = '%s CH%d-%d' % (ph_sel, ich + 1, ip)
burstarray = bsearch(ph_bs, L, m, Tck[ip], slice_=(l0, l1 + 1),
label=label, verbose=verbose)
if burstarray.size > 1:
burstarray_ch_list.append(burstarray)
if len(burstarray_ch_list) > 0:
data = np.vstack(burstarray_ch_list)
bursts = bslib.Bursts(data)
if compact:
bursts.recompute_times(ph, out=bursts)
else:
bursts = bslib.Bursts.empty()
MBurst.append(bursts)
self.add(mburst=MBurst)
if ph_sel != Ph_sel('all') and index_allph:
# Convert the burst data to be relative to ph_times_m.
# Convert both Lim/Ph_p and mburst, as they are both needed
# to compute `.bp`.
self.recompute_bg_lim_ph_p(ph_sel=Ph_sel('all'), mute=mute)
self._fix_mburst_from(ph_sel=ph_sel, mute=mute)
def _fix_mburst_from(self, ph_sel, mute=False):
"""Convert burst data from any ph_sel to 'all' timestamps selection.
"""
assert isinstance(ph_sel, Ph_sel) and not self._is_allph(ph_sel)
pprint(' - Fixing burst data to refer to ph_times_m ... ', mute)
for bursts, mask in zip(self.mburst,
self.iter_ph_masks(ph_sel=ph_sel)):
bursts.recompute_index_expand(mask, out=bursts)
pprint('[DONE]\n', mute)
def burst_search(self, L=None, m=10, F=6., P=None, min_rate_cps=None,
ph_sel=Ph_sel('all'), compact=False, index_allph=True,
c=-1, computefret=True, max_rate=False, dither=False,
pure_python=False, verbose=False, mute=False, pax=False):
"""Performs a burst search with specified parameters.
This method performs a sliding-window burst search without
binning the timestamps. The burst starts when the rate of `m`
photons is above a minimum rate, and stops when the rate falls below
the threshold. The result of the burst search is stored in the
`mburst` attribute (a list of Bursts objects, one per channel)
containing start/stop times and indexes. By default, after burst
search, this method computes donor and acceptor counts, it applies
burst corrections (background, leakage, etc...) and computes
E (and S in case of ALEX). You can skip these steps by passing
`computefret=False`.
The minimum rate can be explicitly specified with the `min_rate_cps`
argument, or computed as a function of the background rate with the
`F` argument.
Parameters:
m (int): number of consecutive photons used to compute the
photon rate. Typical values 5-20. Default 10.
L (int or None): minimum number of photons in burst. If None
(default) L = m is used.
F (float): defines how many times higher than the background rate
is the minimum rate used for burst search
(`min rate = F * bg. rate`), assuming that `P = None` (default).
Typical values are 3-9. Default 6.
P (float): threshold for burst detection expressed as a
probability that a detected bursts is not due to a Poisson
background. If not None, `P` overrides `F`. Note that the
background process is experimentally super-Poisson so this
probability is not physically very meaningful. Using this
argument is discouraged.
min_rate_cps (float or list/array): minimum rate in cps for burst
start. If not None, it has the precedence over `P` and `F`.
If non-scalar, contains one rate per each multispot channel.
Typical values range from 20e3 to 100e3.
ph_sel (Ph_sel object): defines the "photon selection" (or stream)
to be used for burst search. Default: all photons.
See :mod:`fretbursts.ph_sel` for details.
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
index_allph (bool): if True (default), the indexes of burst start
and stop (`istart`, `istop`) are relative to the full
timestamp array. If False, the indexes are relative to
timestamps selected by the `ph_sel` argument.
c (float): correction factor used in the rate vs time-lags relation.
`c` affects the computation of the burst-search parameter `T`.
When `F` is not None, `T = (m - 1 - c) / (F * bg_rate)`.
When using `min_rate_cps`, `T = (m - 1 - c) / min_rate_cps`.
computefret (bool): if True (default) compute donor and acceptor
counts, apply corrections (background, leakage, direct
excitation) and compute E (and S). If False, skip all these
steps and stop just after the initial burst search.
max_rate (bool): if True compute the max photon rate inside each
burst using the same `m` used for burst search. If False
(default) skip this step.
dither (bool): if True applies dithering corrections to burst
counts. Default False. See :meth:`Data.dither`.
pure_python (bool): if True, uses the pure python functions even
when optimized Cython functions are available.
pax (bool): this has effect only if measurement is PAX.
In this case, when True computes E using a PAX-enhanced
formula: ``(2 na) / (2 na + nd + nda)``.
Otherwise use the usual usALEX formula: ``na / na + nd``.
Quantities `nd`/`na` are D/A burst counts during D excitation
period, while `nda` is D emission during A excitation period.
Note:
when using `P` or `F` the background rates are needed, so
`.calc_bg()` must be called before the burst search.
Example:
d.burst_search(m=10, F=6)
Returns:
None, all the results are saved in the `Data` object.
"""
ph_sel = self._fix_ph_sel(ph_sel)
if compact:
self._assert_compact(ph_sel)
pprint(" - Performing burst search (verbose=%s) ..." % verbose, mute)
# Erase any previous burst data
self.delete_burst_data()
if L is None:
L = m
if min_rate_cps is not None:
# Saves rate_th in self
self._burst_search_rate(m=m, L=L, min_rate_cps=min_rate_cps, c=c,
ph_sel=ph_sel, compact=compact,
index_allph=index_allph,
verbose=verbose, pure_python=pure_python)
else:
# Compute TT, saves P and F in self
self._calc_T(m=m, P=P, F=F, ph_sel=ph_sel, c=c)
# Use TT and compute mburst
self._burst_search_TT(L=L, m=m, ph_sel=ph_sel, compact=compact,
index_allph=index_allph, verbose=verbose,
pure_python=pure_python, mute=mute)
pprint("[DONE]\n", mute)
pprint(" - Calculating burst periods ...", mute)
self._calc_burst_period() # writes bp
pprint("[DONE]\n", mute)
# (P, F) or rate_th are saved in _calc_T() or _burst_search_rate()
self.add(m=m, L=L, ph_sel=ph_sel)
# The correction flags are both set here and in calc_ph_num() so that
# they are always consistent. Case 1: we perform only burst search
# (with no call to calc_ph_num). Case 2: we re-call calc_ph_num()
# without doing a new burst search
self.add(bg_corrected=False, leakage_corrected=False,
dir_ex_corrected=False, dithering=False)
self._burst_search_postprocess(
computefret=computefret, max_rate=max_rate, dither=dither,
pure_python=pure_python, mute=mute, pax=pax)
def _burst_search_postprocess(self, computefret, max_rate, dither,
pure_python, mute, pax):
if computefret:
pprint(" - Counting D and A ph and calculating FRET ... \n", mute)
self.calc_fret(count_ph=True, corrections=True, dither=dither,
mute=mute, pure_python=pure_python, pax=pax)
pprint(" [DONE Counting D/A]\n", mute)
if max_rate:
pprint(" - Computing max rates in burst ...", mute)
self.calc_max_rate(m=self.m)
pprint("[DONE]\n", mute)
def calc_ph_num(self, alex_all=False, pure_python=False):
"""Computes number of D, A (and AA) photons in each burst.
Arguments:
alex_all (bool): if True and self.ALEX is True, computes also the
donor channel photons during acceptor excitation (`nda`)
pure_python (bool): if True, uses the pure python functions even
when the optimized Cython functions are available.
Returns:
Saves `nd`, `na`, `nt` (and eventually `naa`, `nda`) in self.
Returns None.
"""
mch_count_ph_in_bursts = _get_mch_count_ph_in_bursts_func(pure_python)
if not self.alternated:
nt = [b.counts.astype(float) if b.num_bursts > 0 else np.array([])
for b in self.mburst]
A_em = [self.get_A_em(ich) for ich in range(self.nch)]
if isinstance(A_em[0], slice):
# This is to support the case of A-only or D-only data
n0 = [np.zeros(mb.num_bursts) for mb in self.mburst]
if A_em[0] == slice(None):
nd, na = n0, nt # A-only case
elif A_em[0] == slice(0):
nd, na = nt, n0 # D-only case
else:
# This is the usual case with photons in both D and A channels
na = mch_count_ph_in_bursts(self.mburst, A_em)
nd = [t - a for t, a in zip(nt, na)]
assert (nt[0] == na[0] + nd[0]).all()
else:
# The "new style" would be:
#Mask = [m for m in self.iter_ph_masks(Ph_sel(Dex='Dem'))]
Mask = [d_em * d_ex for d_em, d_ex in zip(self.D_em, self.D_ex)]
nd = mch_count_ph_in_bursts(self.mburst, Mask)
Mask = [a_em * d_ex for a_em, d_ex in zip(self.A_em, self.D_ex)]
na = mch_count_ph_in_bursts(self.mburst, Mask)
Mask = [a_em * a_ex for a_em, a_ex in zip(self.A_em, self.A_ex)]
naa = mch_count_ph_in_bursts(self.mburst, Mask)
self.add(naa=naa)
if alex_all or 'PAX' in self.meas_type:
Mask = [d_em * a_ex for d_em, a_ex in zip(self.D_em, self.A_ex)]
nda = mch_count_ph_in_bursts(self.mburst, Mask)
self.add(nda=nda)
if self.ALEX:
nt = [d + a + aa for d, a, aa in zip(nd, na, naa)]
assert (nt[0] == na[0] + nd[0] + naa[0]).all()
elif 'PAX' in self.meas_type:
nt = [d + a + da + aa for d, a, da, aa in zip(nd, na, nda, naa)]
assert (nt[0] == na[0] + nd[0] + nda[0] + naa[0]).all()
# This is a copy of na which will never be corrected
# (except for background). It is used to compute the
# equivalent of naa for PAX:
# naa~ = naa - nar
# where naa~ is the A emission due to direct excitation
# by A laser during D+A-excitation,
# nar is the uncorrected A-channel signal during D-excitation,
# and naa is the A-channel signal during D+A excitation.
nar = [a.copy() for a in na]
self.add(nar=nar)
self.add(nd=nd, na=na, nt=nt,
bg_corrected=False, leakage_corrected=False,
dir_ex_corrected=False, dithering=False)
def fuse_bursts(self, ms=0, process=True, mute=False):
"""Return a new :class:`Data` object with nearby bursts fused together.
Arguments:
ms (float): fuse all burst separated by less than `ms` millisecs.
If < 0 no burst is fused. Note that with ms = 0, overlapping
bursts are fused.
process (bool): if True (default), reprocess the burst data in
the new object applying corrections and computing FRET.
mute (bool): if True suppress any printed output.
"""
if ms < 0:
return self
mburst = mch_fuse_bursts(self.mburst, ms=ms, clk_p=self.clk_p)
new_d = Data(**self)
for k in ['E', 'S', 'nd', 'na', 'naa', 'nda', 'nar', 'nt', 'lsb', 'bp']:
if k in new_d:
new_d.delete(k)
new_d.add(bg_corrected=False, leakage_corrected=False,
dir_ex_corrected=False, dithering=False)
new_d.add(mburst=mburst, fuse=ms)
if 'bg' in new_d:
new_d._calc_burst_period()
if process:
pprint(" - Counting D and A ph and calculating FRET ... \n", mute)
new_d.calc_fret(count_ph=True, corrections=True,
dither=self.dithering, mute=mute, pax=self.pax)
pprint(" [DONE Counting D/A and FRET]\n", mute)
return new_d
##
# Burst selection and filtering
#
def select_bursts(self, filter_fun, negate=False, computefret=True,
args=None, **kwargs):
"""Return an object with bursts filtered according to `filter_fun`.
This is the main method to select bursts according to different
criteria. The selection rule is defined by the selection function
`filter_fun`. FRETBursts provides a several predefined selection
functions see :ref:`burst_selection`. New selection
functions can be defined and passed to this method to implement
arbitrary selection rules.
Arguments:
filter_fun (fuction): function used for burst selection
negate (boolean): If True, negates (i.e. take the complementary)
of the selection returned by `filter_fun`. Default `False`.
computefret (boolean): If True (default) recompute donor and
acceptor counts, corrections and FRET quantities (i.e. E, S)
in the new returned object.
args (tuple or None): positional arguments for `filter_fun()`
kwargs:
Additional keyword arguments passed to `filter_fun()`.
Returns:
A new :class:`Data` object containing only the selected bursts.
Note:
In order to save RAM, the timestamp arrays (`ph_times_m`)
of the new Data() points to the same arrays of the original
Data(). Conversely, all the bursts data (`mburst`, `nd`, `na`,
etc...) are new distinct objects.
"""
Masks, str_sel = self.select_bursts_mask(filter_fun, negate=negate,
return_str=True, args=args,
**kwargs)
d_sel = self.select_bursts_mask_apply(Masks, computefret=computefret,
str_sel=str_sel)
return d_sel
def select_bursts_mask(self, filter_fun, negate=False, return_str=False,
args=None, **kwargs):
"""Returns mask arrays to select bursts according to `filter_fun`.
The function `filter_fun` is called to compute the mask arrays for
each channel.
This method is useful when you want to apply a selection from one
object to a second object. Otherwise use :meth:`Data.select_bursts`.
Arguments:
filter_fun (fuction): function used for burst selection
negate (boolean): If True, negates (i.e. take the complementary)
of the selection returned by `filter_fun`. Default `False`.
return_str: if True return, for each channel, a tuple with
a bool array and a string that can be added to the measurement
name to indicate the selection. If False returns only
the bool array. Default False.
args (tuple or None): positional arguments for `filter_fun()`
kwargs:
Additional keyword arguments passed to `filter_fun()`.
Returns:
A list of boolean arrays (one per channel) that define the burst
selection. If `return_str` is True returns a list of tuples, where
each tuple is a bool array and a string.
See also:
:meth:`Data.select_bursts`, :meth:`Data.select_bursts_mask_apply`
"""
# Create the list of bool masks for the bursts selection
if args is None:
args = tuple()
M = [filter_fun(self, i, *args, **kwargs) for i in range(self.nch)]
# Make sure the selection function has the right return signature
msg = 'The second argument returned by `%s` must be a string.'
assert np.all([isinstance(m[1], str) for m in M]), msg % filter_fun
# Make sure all boolean masks have the right size
msg = ("The size of boolean masks returned by `%s` needs to match "
"the number of bursts.")
assert np.all([m[0].size == n for m, n in zip(M, self.num_bursts)]), (
msg % filter_fun)
Masks = [-m[0] if negate else m[0] for m in M]
str_sel = M[0][1]
if return_str:
return Masks, str_sel
else:
return Masks
def select_bursts_mask_apply(self, masks, computefret=True, str_sel=''):
"""Returns a new Data object with bursts selected according to `masks`.
This method select bursts using a list of boolean arrays as input.
Since the user needs to create the boolean arrays first, this method
is useful when experimenting with new selection criteria that don't
have a dedicated selection function. Usually, however, it is easier
to select bursts through :meth:`Data.select_bursts` (using a
selection function).
Arguments:
masks (list of arrays): each element in this list is a boolean
array that selects bursts in a channel.
computefret (boolean): If True (default) recompute donor and
acceptor counts, corrections and FRET quantities (i.e. E, S)
in the new returned object.
Returns:
A new :class:`Data` object containing only the selected bursts.
Note:
In order to save RAM, the timestamp arrays (`ph_times_m`)
of the new Data() points to the same arrays of the original
Data(). Conversely, all the bursts data (`mburst`, `nd`, `na`,
etc...) are new distinct objects.
See also:
:meth:`Data.select_bursts`, :meth:`Data.select_mask`
"""
# Attributes of ds point to the same objects of self
ds = Data(**self)
##Copy the per-burst fields that must be filtered
used_fields = [field for field in Data.burst_fields if field in self]
for name in used_fields:
# Recreate the current attribute as a new list to avoid modifying
# the old list that is also in the original object.
# The list is initialized with empty arrays because this is the
# valid value when a ch has no bursts.
empty = bslib.Bursts.empty() if name == 'mburst' else np.array([])
ds.add(**{name: [empty] * self.nch})
# Assign the new data
for ich, mask in enumerate(masks):
if self[name][ich].size == 0:
continue # -> no bursts in ch
# Note that boolean masking implies numpy array copy
# On the contrary slicing only makes a new view of the array
ds[name][ich] = self[name][ich][mask]
# Recompute E and S
if computefret:
ds.calc_fret(count_ph=False, pax=self.pax)
# Add the annotation about the filter function
ds.s = list(self.s + [str_sel]) # using append would modify also self
return ds
##
# Burst corrections
#
def background_correction(self, relax_nt=False, mute=False):
"""Apply background correction to burst sizes (nd, na,...)
"""
if self.bg_corrected:
return -1
pprint(" - Applying background correction.\n", mute)
self.add(bg_corrected=True)
for ich, bursts in enumerate(self.mburst):
if bursts.num_bursts == 0:
continue # if no bursts skip this ch
period = self.bp[ich]
nd, na, bg_d, bg_a, width = self.expand(ich, width=True)
nd -= bg_d
na -= bg_a
if 'nar' in self:
# Apply background correction to PAX field nar
self.nar[ich][:] = na
if relax_nt:
# This does not guarantee that nt = nd + na
self.nt[ich] -= self.bg_from(Ph_sel('all'))[ich][period] * width
else:
self.nt[ich] = nd + na
if self.alternated:
bg_aa = self.bg_from(Ph_sel(Aex='Aem'))
self.naa[ich] -= bg_aa[ich][period] * width
if 'nda' in self:
bg_da = self.bg_from(Ph_sel(Aex='Dem'))
self.nda[ich] -= bg_da[ich][period] * width
self.nt[ich] += self.naa[ich]
if 'PAX' in self.meas_type:
self.nt[ich] += self.nda[ich]
def leakage_correction(self, mute=False):
"""Apply leakage correction to burst sizes (nd, na,...)
"""
if self.leakage_corrected:
return -1
elif self.leakage != 0:
pprint(" - Applying leakage correction.\n", mute)
Lk = self.get_leakage_array()
for i, num_bursts in enumerate(self.num_bursts):
if num_bursts == 0:
continue # if no bursts skip this ch
self.na[i] -= self.nd[i] * Lk[i]
self.nt[i] = self.nd[i] + self.na[i]
if self.ALEX:
self.nt[i] += self.naa[i]
elif 'PAX' in self.meas_type:
self.nt[i] += (self.nda[i] + self.naa[i])
self.add(leakage_corrected=True)
def direct_excitation_correction(self, mute=False):
"""Apply direct excitation correction to bursts (ALEX-only).
The applied correction is: na -= naa*dir_ex
"""
if self.dir_ex_corrected:
return -1
elif self.dir_ex != 0:
pprint(" - Applying direct excitation correction.\n", mute)
for i, num_bursts in enumerate(self.num_bursts):
if num_bursts == 0:
continue # if no bursts skip this ch
naa = self.naa[i]
if 'PAX' in self.meas_type:
naa = naa - self.nar[i] # do not modify inplace
self.na[i] -= naa * self.dir_ex
self.nt[i] = self.nd[i] + self.na[i]
if self.ALEX:
self.nt[i] += self.naa[i]
elif 'PAX' in self.meas_type:
self.nt[i] += (self.nda[i] + self.naa[i])
self.add(dir_ex_corrected=True)
def dither(self, lsb=2, mute=False):
"""Add dithering (uniform random noise) to burst counts (nd, na,...).
The dithering amplitude is the range -0.5*lsb .. 0.5*lsb.
"""
if self.dithering:
return -1
pprint(" - Applying burst-size dithering.\n", mute)
self.add(dithering=True)
for nd, na in zip(self.nd, self.na):
nd += lsb * (np.random.rand(nd.size) - 0.5)
na += lsb * (np.random.rand(na.size) - 0.5)
if self.alternated:
for naa in self.naa:
naa += lsb * (np.random.rand(naa.size) - 0.5)
if 'nda' in self:
for nda in self.nda:
nda += lsb * (np.random.rand(nda.size) - 0.5)
self.add(lsb=lsb)
def calc_chi_ch(self, E):
"""Calculate the gamma correction prefactor factor `chi_ch` (array).
Computes `chi_ch`, a channel-dependent prefactor for gamma used
to correct dispersion of E across channels.
Returns:
array of `chi_ch` correction factors (one per spot).
To apply the correction assign the returned array to `Data.chi_ch`.
Upon assignment E values for all bursts will be corrected.
"""
chi_ch = (1 / E.mean() - 1) / (1 / E - 1)
return chi_ch
def corrections(self, mute=False):
"""Apply corrections on burst-counts: nd, na, nda, naa.
The corrections are: background, leakage (or bleed-through) and
direct excitation (dir_ex).
"""
self.background_correction(mute=mute)
self.leakage_correction(mute=mute)
if self.alternated:
self.direct_excitation_correction(mute=mute)
def _update_corrections(self):
"""Recompute corrections whose flag is True.
Checks the flags .bg_corrected, .leakage_corrected, .dir_ex_corrected,
.dithering and recomputes the correction if the corresponding flag
is True (i.e. if the correction was already applied).
Note that this method is not used for gamma and beta corrections
because these do not affect the `nd`, `na` and `naa` quantities but
are only applied when computing E, S and corrected size.
Differently from :meth:`corrections`, this allows to recompute
corrections that have already been applied.
"""
if 'mburst' not in self:
return # no burst search performed yet
old_bg_corrected = self.bg_corrected
old_leakage_corrected = self.leakage_corrected
old_dir_ex_corrected = self.dir_ex_corrected
old_dithering = self.dithering
self.calc_ph_num() # recompute uncorrected na, nd, nda, naa
if old_bg_corrected:
self.background_correction()
if old_leakage_corrected:
self.leakage_correction()
if old_dir_ex_corrected:
self.direct_excitation_correction()
if old_dithering:
self.dither(self.lsb)
# Recompute E and S with no corrections (because already applied)
self.calc_fret(count_ph=False, corrections=False, pax=self.pax)
@property
def leakage(self):
"""Spectral leakage (bleed-through) of D emission in the A channel.
"""
return self._leakage
@leakage.setter
def leakage(self, leakage):
self._update_leakage(leakage)
def _update_leakage(self, leakage):
"""Apply/update leakage (or bleed-through) correction.
"""
assert (np.size(leakage) == 1) or (np.size(leakage) == self.nch)
self.add(_leakage=np.asfarray(leakage), leakage_corrected=True)
self._update_corrections()
@property
def dir_ex(self):
"""Direct excitation correction factor."""
return self._dir_ex
@dir_ex.setter
def dir_ex(self, value):
self._update_dir_ex(value)
def _update_dir_ex(self, dir_ex):
"""Apply/update direct excitation correction with value `dir_ex`.
"""
assert np.size(dir_ex) == 1
self.add(_dir_ex=float(dir_ex), dir_ex_corrected=True)
self._update_corrections()
@property
def beta(self):
"""Beta factor used to correct S (compensates Dex and Aex unbalance).
"""
return self._beta
@beta.setter
def beta(self, value):
self._update_beta(value)
def _update_beta(self, beta):
"""Change the `beta` value and recompute E and S."""
assert np.size(beta) == 1
self.add(_beta=float(beta))
if 'mburst' in self:
# Recompute E and S and delete fitter objects
self.calc_fret(corrections=False, pax=self.pax)
@property
def chi_ch(self):
"""Per-channel relative gamma factor."""
return self._chi_ch
@chi_ch.setter
def chi_ch(self, value):
self._update_chi_ch(value)
def _update_chi_ch(self, chi_ch):
"""Change the `chi_ch` value and recompute E and S."""
msg = 'chi_ch is a per-channel correction and must have size == nch.'
assert np.size(chi_ch) == self.nch, ValueError(msg)
self.add(_chi_ch=np.asfarray(chi_ch))
if 'mburst' in self:
# Recompute E and S and delete fitter objects
self.calc_fret(corrections=False, pax=self.pax)
@property
def gamma(self):
"""Gamma correction factor (compensates DexDem and DexAem unbalance).
"""
return self._gamma
@gamma.setter
def gamma(self, value):
self._update_gamma(value)
def _update_gamma(self, gamma):
"""Change the `gamma` value and recompute E and S."""
assert (np.size(gamma) == 1) or (np.size(gamma) == self.nch)
self.add(_gamma=np.asfarray(gamma))
if 'mburst' in self:
# Recompute E and S and delete fitter objects
self.calc_fret(corrections=False, pax=self.pax)
def get_gamma_array(self):
"""Get the array of gamma factors, one per ch.
It always returns an array of gamma factors regardless of
whether `self.gamma` is scalar or array.
Each element of the returned array is multiplied by `chi_ch`.
"""
gamma = self.gamma
G = np.repeat(gamma, self.nch) if np.size(gamma) == 1 else gamma
G *= self.chi_ch
return G
def get_leakage_array(self):
"""Get the array of leakage coefficients, one per ch.
It always returns an array of leakage coefficients regardless of
whether `self.leakage` is scalar or array.
Each element of the returned array is multiplied by `chi_ch`.
"""
leakage = self.leakage
Lk = np.r_[[leakage] * self.nch] if np.size(leakage) == 1 else leakage
Lk *= self.chi_ch
return Lk
##
# Methods to compute burst quantities: FRET, S, SBR, max_rate, etc ...
#
def calc_sbr(self, ph_sel=Ph_sel('all'), gamma=1.):
"""Return Signal-to-Background Ratio (SBR) for each burst.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection
for which to compute the sbr. Changes the photons used for
burst size and the corresponding background rate. Valid values
here are Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem').
See :mod:`fretbursts.ph_sel` for details.
gamma (float): gamma value used to compute corrected burst size
in the case `ph_sel` is Ph_sel('all'). Ignored otherwise.
Returns:
A list of arrays (one per channel) with one value per burst.
The list is also saved in `sbr` attribute.
"""
ph_sel = self._fix_ph_sel(ph_sel)
sbr = []
for ich, mb in enumerate(self.mburst):
if mb.num_bursts == 0:
sbr.append(np.array([]))
continue # if no bursts skip this ch
nd, na, bg_d, bg_a = self.expand(ich)
nt = self.burst_sizes_ich(ich=ich, gamma=gamma)
signal = {Ph_sel('all'): nt,
Ph_sel(Dex='Dem'): nd, Ph_sel(Dex='Aem'): na}
background = {Ph_sel('all'): bg_d + bg_a,
Ph_sel(Dex='Dem'): bg_d, Ph_sel(Dex='Aem'): bg_a}
sbr.append(signal[ph_sel] / background[ph_sel])
self.add(sbr=sbr)
return sbr
def calc_burst_ph_func(self, func, func_kw, ph_sel=Ph_sel('all'),
compact=False, ich=0):
"""Evaluate a scalar function from photons in each burst.
This method allow calling an arbitrary function on the photon
timestamps of each burst. For example if `func` is `np.mean` it
computes the mean time in each bursts.
Arguments:
func (callable): function that takes as first argument an array of
timestamps for one burst.
func_kw (callable): additional arguments to be passed `func`.
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
Returns:
A list (on element per channel) array. The array size is equal to
the number of bursts in the corresponding channel.
"""
if compact:
self._assert_compact(ph_sel)
kwargs = dict(func=func, func_kw=func_kw, compact=compact)
if self.alternated:
kwargs.update(alex_period=self.alex_period)
if compact:
kwargs.update(excitation_width=self._excitation_width(ph_sel))
results_mch = [burst_ph_stats(ph, bursts, mask=mask, **kwargs)
for ph, mask, bursts in
zip(self.iter_ph_times(),
self.iter_ph_masks(ph_sel=ph_sel),
self.mburst)]
return results_mch
def calc_max_rate(self, m, ph_sel=Ph_sel('all'), compact=False,
c=phrates.default_c):
"""Compute the max m-photon rate reached in each burst.
Arguments:
m (int): number of timestamps to use to compute the rate.
As for burst search, typical values are 5-20.
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
c (float): this parameter is used in the definition of the
rate estimator which is `(m - 1 - c) / t[last] - t[first]`.
For more details see :func:`.phtools.phrates.mtuple_rates`.
"""
ph_sel = self._fix_ph_sel(ph_sel)
Max_Rate = self.calc_burst_ph_func(func=phrates.mtuple_rates_max,
func_kw=dict(m=m, c=c),
ph_sel=ph_sel, compact=compact)
Max_Rate = [mr / self.clk_p - bg[bp] for bp, bg, mr in
zip(self.bp, self.bg_from(ph_sel), Max_Rate)]
params = dict(m=m, ph_sel=ph_sel, compact=compact)
self.add(max_rate=Max_Rate, max_rate_params=params)
def calc_fret(self, count_ph=False, corrections=True, dither=False,
mute=False, pure_python=False, pax=False):
"""Compute FRET (and stoichiometry if ALEX) for each burst.
This is an high-level functions that can be run after burst search.
By default, it will count Donor and Acceptor photons, perform
corrections (background, leakage), and compute gamma-corrected
FRET efficiencies (and stoichiometry if ALEX).
Arguments:
count_ph (bool): if True (default), calls :meth:`calc_ph_num` to
counts Donor and Acceptor photons in each bursts
corrections (bool): if True (default), applies background and
bleed-through correction to burst data
dither (bool): whether to apply dithering to burst size.
Default False.
mute (bool): whether to mute all the printed output. Default False.
pure_python (bool): if True, uses the pure python functions even
when the optimized Cython functions are available.
pax (bool): this has effect only if measurement is PAX.
In this case, when True computes E using a PAX-enhanced
formula: ``(2 na) / (2 na + nd + nda)``.
Otherwise use the usual usALEX formula: ``na / na + nd``.
Quantities `nd`/`na` are D/A burst counts during D excitation
period, while `nda` is D emission during A excitation period.
Returns:
None, all the results are saved in the object.
"""
if count_ph:
self.calc_ph_num(pure_python=pure_python, alex_all=True)
if dither:
self.dither(mute=mute)
if corrections:
self.corrections(mute=mute)
self._calculate_fret_eff(pax=pax)
if self.alternated:
self._calculate_stoich(pax=pax)
#self._calc_alex_hist()
for attr in ('ES_binwidth', 'ES_hist', 'E_fitter', 'S_fitter'):
# E_fitter and S_fitter are only attributes
# so we cannot use the membership syntax (attr in self)
if hasattr(self, attr):
self.delete(attr, warning=False)
def _aex_fraction(self):
"""Proportion of Aex period versus Dex + Aex."""
assert self.alternated
D_ON, A_ON = self.D_ON, self.A_ON
return ((A_ON[1] - A_ON[0]) /
(A_ON[1] - A_ON[0] + D_ON[1] - D_ON[0]))
def _aex_dex_ratio(self):
"""Ratio of Aex and Dex period durations."""
assert self.alternated
D_ON, A_ON = self.D_ON, self.A_ON
return (A_ON[1] - A_ON[0]) / (D_ON[1] - D_ON[0])
def _calculate_fret_eff(self, pax=False):
"""Compute FRET efficiency (`E`) for each burst."""
G = self.get_gamma_array()
if not pax:
E = [na / (g * nd + na) for nd, na, g in zip(self.nd, self.na, G)]
else:
alpha = 1 - self._aex_fraction()
E = [(na / alpha) / (g * (nd + nda) + (na / alpha))
for nd, na, nda, g in zip(self.nd, self.na, self.nda, G)]
self.add(E=E, pax=pax)
def _calculate_stoich(self, pax=False):
"""Compute "stoichiometry" (the `S` parameter) for each burst."""
G = self.get_gamma_array()
naa = self.naa
if 'PAX' in self.meas_type:
naa = [self._get_naa_ich(i) for i in range(self.nch)]
if not pax:
S = [(g * d + a) / (g * d + a + aa / self.beta)
for d, a, aa, g in zip(self.nd, self.na, naa, G)]
else:
# This is a PAX-enhanced formula which uses information
# from both alternation periods in order to compute S
alpha = 1 - self._aex_fraction()
S = [(g * (d + da) + a / alpha) /
(g * (d + da) + a / alpha + aa / (alpha * self.beta))
for d, a, da, aa, g in
zip(self.nd, self.na, self.nda, naa, G)]
self.add(S=S)
def _calc_alex_hist(self, binwidth=0.05):
"""Compute the ALEX histogram with given bin width `bin_step`"""
if 'ES_binwidth' in self and self.ES_binwidth == binwidth:
return
ES_hist_tot = [ES_histog(E, S, binwidth) for E, S in
zip(self.E, self.S)]
E_bins, S_bins = ES_hist_tot[0][1], ES_hist_tot[0][2]
ES_hist = [h[0] for h in ES_hist_tot]
E_ax = E_bins[:-1] + 0.5 * binwidth
S_ax = S_bins[:-1] + 0.5 * binwidth
self.add(ES_hist=ES_hist, E_bins=E_bins, S_bins=S_bins,
E_ax=E_ax, S_ax=S_ax, ES_binwidth=binwidth)
##
# Methods for measurement info
#
def status(self, add="", noname=False):
"""Return a string with burst search, corrections and selection info.
"""
name = "" if noname else self.name
s = name
if 'L' in self: # burst search has been done
if 'rate_th' in self:
s += " BS_%s L%d m%d MR%d" % (self.ph_sel, self.L, self.m,
np.mean(self.rate_th) * 1e-3)
else:
P_str = '' if self.P is None else ' P%s' % self.P
s += " BS_%s L%d m%d F%.1f%s" % \
(self.ph_sel, self.L, self.m, np.mean(self.F), P_str)
s += " G%.3f" % np.mean(self.gamma)
if 'bg_fun' in self: s += " BG%s" % self.bg_fun.__name__[:-4]
if 'bg_time_s' in self: s += "-%ds" % self.bg_time_s
if 'fuse' in self: s += " Fuse%.1fms" % self.fuse
if 'bg_corrected' in self and self.bg_corrected:
s += " bg"
if 'leakage_corrected' in self and self.leakage_corrected:
s += " Lk%.3f" % np.mean(self.leakage*100)
if 'dir_ex_corrected' in self and self.dir_ex_corrected:
s += " dir%.1f" % (self.dir_ex*100)
if 'dithering' in self and self.dithering:
s += " Dith%d" % self.lsb
if 's' in self: s += ' '.join(self.s)
return s + add
@property
def name(self):
"""Measurement name: last subfolder + file name with no extension."""
if not hasattr(self, '_name'):
basename = str(os.path.splitext(os.path.basename(self.fname))[0])
name = basename
last_dir = str(os.path.basename(os.path.dirname(self.fname)))
if len(last_dir) > 0:
name = '_'.join([last_dir, basename])
self.add(_name=name)
return self._name
@name.setter
def name(self, value):
self.add(_name=value)
def Name(self, add=""):
"""Return short filename + status information."""
n = self.status(add=add)
return n
def __repr__(self):
return self.status()
def stats(self, string=False):
"""Print common statistics (BG rates, #bursts, mean size, ...)"""
s = print_burst_stats(self)
if string:
return s
else:
print(s)
##
# FRET fitting methods
#
def fit_E_m(self, E1=-1, E2=2, weights='size', gamma=1.):
"""Fit E in each channel with the mean using bursts in [E1,E2] range.
Note:
This two fitting are equivalent (but the first is much faster)::
fit_E_m(weights='size')
fit_E_minimize(kind='E_size', weights='sqrt')
However `fit_E_minimize()` does not provide a model curve.
"""
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res, fit_model_F = zeros((self.nch, 2)), zeros(self.nch)
for ich, (nd, na, E, mask) in enumerate(zip(
self.nd, self.na, self.E, Mask)):
w = fret_fit.get_weights(nd[mask], na[mask],
weights=weights, gamma=gamma)
# Compute weighted mean
fit_res[ich, 0] = np.dot(w, E[mask])/w.sum()
# Compute weighted variance
fit_res[ich, 1] = np.sqrt(
np.dot(w, (E[mask] - fit_res[ich, 0])**2)/w.sum())
fit_model_F[ich] = mask.sum()/mask.size
fit_model = lambda x, p: SS.norm.pdf(x, p[0], p[1])
self.add(fit_E_res=fit_res, fit_E_name='Moments',
E_fit=fit_res[:, 0], fit_E_curve=True, fit_E_E1=E1,
fit_E_E2=E2, fit_E_model=fit_model,
fit_E_model_F=fit_model_F)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_ML_poiss(self, E1=-1, E2=2, method=1, **kwargs):
"""ML fit for E modeling size ~ Poisson, using bursts in [E1,E2] range.
"""
assert method in [1, 2, 3]
fit_fun = {1: fret_fit.fit_E_poisson_na, 2: fret_fit.fit_E_poisson_nt,
3: fret_fit.fit_E_poisson_nd}
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res = zeros(self.nch)
for ich, mask in zip(range(self.nch), Mask):
nd, na, bg_d, bg_a = self.expand(ich)
bg_x = bg_d if method == 3 else bg_a
fit_res[ich] = fit_fun[method](nd[mask], na[mask],
bg_x[mask], **kwargs)
self.add(fit_E_res=fit_res, fit_E_name='MLE: na ~ Poisson',
E_fit=fit_res, fit_E_curve=False, fit_E_E1=E1, fit_E_E2=E2)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_ML_binom(self, E1=-1, E2=2, **kwargs):
"""ML fit for E modeling na ~ Binomial, using bursts in [E1,E2] range.
"""
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res = np.array([fret_fit.fit_E_binom(_d[mask], _a[mask], **kwargs)
for _d, _a, mask in zip(self.nd, self.na, Mask)])
self.add(fit_E_res=fit_res, fit_E_name='MLE: na ~ Binomial',
E_fit=fit_res, fit_E_curve=False, fit_E_E1=E1, fit_E_E2=E2)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_minimize(self, kind='slope', E1=-1, E2=2, **kwargs):
"""Fit E using method `kind` ('slope' or 'E_size') and bursts in [E1,E2]
If `kind` is 'slope' the fit function is fret_fit.fit_E_slope()
If `kind` is 'E_size' the fit function is fret_fit.fit_E_E_size()
Additional arguments in `kwargs` are passed to the fit function.
"""
assert kind in ['slope', 'E_size']
# Build a dictionary fun_d so we'll call the function fun_d[kind]
fun_d = dict(slope=fret_fit.fit_E_slope,
E_size=fret_fit.fit_E_E_size)
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res = np.array([fun_d[kind](nd[mask], na[mask], **kwargs)
for nd, na, mask in
zip(self.nd, self.na, Mask)])
fit_name = dict(slope='Linear slope fit', E_size='E_size fit')
self.add(fit_E_res=fit_res, fit_E_name=fit_name[kind],
E_fit=fit_res, fit_E_curve=False, fit_E_E1=E1, fit_E_E2=E2)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_two_gauss_EM(self, fit_func=two_gaussian_fit_EM,
weights='size', gamma=1., **kwargs):
"""Fit the E population to a Gaussian mixture model using EM method.
Additional arguments in `kwargs` are passed to the fit_func().
"""
fit_res = zeros((self.nch, 5))
for ich, (nd, na, E) in enumerate(zip(self.nd, self.na, self.E)):
w = fret_fit.get_weights(nd, na, weights=weights, gamma=gamma)
fit_res[ich, :] = fit_func(E, weights=w, **kwargs)
self.add(fit_E_res=fit_res, fit_E_name=fit_func.__name__,
E_fit=fit_res[:, 2], fit_E_curve=True,
fit_E_model=two_gauss_mix_pdf,
fit_E_model_F=np.repeat(1, self.nch))
return self.E_fit
def fit_E_generic(self, E1=-1, E2=2, fit_fun=two_gaussian_fit_hist,
weights=None, gamma=1., **fit_kwargs):
"""Fit E in each channel with `fit_fun` using burst in [E1,E2] range.
All the fitting functions are defined in
:mod:`fretbursts.fit.gaussian_fitting`.
Parameters:
weights (string or None): specifies the type of weights
If not None `weights` will be passed to
`fret_fit.get_weights()`. `weights` can be not-None only when
using fit functions that accept weights (the ones ending in
`_hist` or `_EM`)
gamma (float): passed to `fret_fit.get_weights()` to compute
weights
All the additional arguments are passed to `fit_fun`. For example `p0`
or `mu_fix` can be passed (see `fit.gaussian_fitting` for details).
Note:
Use this method for CDF/PDF or hist fitting.
For EM fitting use :meth:`fit_E_two_gauss_EM()`.
"""
if fit_fun.__name__.startswith("gaussian_fit"):
fit_model = lambda x, p: SS.norm.pdf(x, p[0], p[1])
if 'mu0' not in fit_kwargs: fit_kwargs.update(mu0=0.5)
if 'sigma0' not in fit_kwargs: fit_kwargs.update(sigma0=0.3)
iE, nparam = 0, 2
elif fit_fun.__name__ == "two_gaussian_fit_hist_min_ab":
fit_model = two_gauss_mix_ab
if 'p0' not in fit_kwargs:
fit_kwargs.update(p0=[0, .05, 0.5, 0.6, 0.1, 0.5])
iE, nparam = 3, 6
elif fit_fun.__name__.startswith("two_gaussian_fit"):
fit_model = two_gauss_mix_pdf
if 'p0' not in fit_kwargs:
fit_kwargs.update(p0=[0, .05, 0.6, 0.1, 0.5])
iE, nparam = 2, 5
else:
raise ValueError("Fitting function not recognized.")
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res, fit_model_F = zeros((self.nch, nparam)), zeros(self.nch)
for ich, (nd, na, E, mask) in enumerate(zip(
self.nd, self.na, self.E, Mask)):
if '_hist' in fit_fun.__name__ or '_EM' in fit_fun.__name__:
if weights is None:
w = None
else:
w = fret_fit.get_weights(nd[mask], na[mask],
weights=weights, gamma=gamma)
fit_res[ich, :] = fit_fun(E[mask], weights=w, **fit_kwargs)
else:
# Non-histogram fits (PDF/CDF) do not support weights
fit_res[ich, :] = fit_fun(E[mask], **fit_kwargs)
fit_model_F[ich] = mask.sum()/mask.size
# Save enough info to generate a fit plot (see hist_fret in burst_plot)
self.add(fit_E_res=fit_res, fit_E_name=fit_fun.__name__,
E_fit=fit_res[:, iE], fit_E_curve=True, fit_E_E1=E1,
fit_E_E2=E2, fit_E_model=fit_model,
fit_E_model_F=fit_model_F, fit_E_weights=weights,
fit_E_gamma=gamma, fit_E_kwargs=fit_kwargs)
return self.E_fit
def fit_from(self, D):
"""Copy fit results from another Data() variable.
Now that the fit methods accept E1,E1 parameter this probabily useless.
"""
# NOTE Are 'fit_guess' and 'fit_fix' still used ?
fit_data = ['fit_E_res', 'fit_E_name', 'E_fit', 'fit_E_curve',
'fit_E_E1', 'fit_E_E2=E2', 'fit_E_model',
'fit_E_model_F', 'fit_guess', 'fit_fix']
for name in fit_data:
if name in D:
self[name] = D[name]
setattr(self, name, self[name])
# Deal with the normalization to the number of bursts
self.add(fit_model_F=r_[[old_E.size/new_E.size \
for old_E, new_E in zip(D.E, self.E)]])
def fit_E_calc_variance(self, weights='sqrt', dist='DeltaE',
E_fit=None, E1=-1, E2=2):
"""Compute several versions of WEIGHTED std.dev. of the E estimator.
`weights` are multiplied *BEFORE* squaring the distance/error
`dist` can be 'DeltaE' or 'SlopeEuclid'
Note:
This method is still experimental
"""
assert dist in ['DeltaE', 'SlopeEuclid']
if E_fit is None:
E_fit = self.E_fit
E1 = self.fit_E_E1 if 'fit_E_E1' in self else -1
E2 = self.fit_E_E2 if 'fit_E_E2' in self else 2
else:
# If E_fit is not None the specified E1,E2 range is used
if E1 < 0 and E2 > 1:
pprint('WARN: E1 < 0 and E2 > 1 (wide range of E eff.)\n')
if size(E_fit) == 1 and self.nch > 0:
E_fit = np.repeat(E_fit, self.nch)
assert size(E_fit) == self.nch
E_sel = [Ei[(Ei > E1)*(Ei < E2)] for Ei in self.E]
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
E_var, E_var_bu, E_var_ph = \
zeros(self.nch), zeros(self.nch), zeros(self.nch)
for i, (Ech, nt, mask) in enumerate(zip(E_sel, self.nt, Mask)):
nt_s = nt[mask]
nd_s, na_s = self.nd[i][mask], self.na[i][mask]
w = fret_fit.get_weights(nd_s, na_s, weights=weights)
info_ph = nt_s.sum()
info_bu = nt_s.size
if dist == 'DeltaE':
distances = (Ech - E_fit[i])
elif dist == 'SlopeEuclid':
distances = fret_fit.get_dist_euclid(nd_s, na_s, E_fit[i])
residuals = distances * w
var = np.mean(residuals**2)
var_bu = np.mean(residuals**2)/info_bu
var_ph = np.mean(residuals**2)/info_ph
#lvar = np.mean(log(residuals**2))
#lvar_bu = np.mean(log(residuals**2)) - log(info_bu)
#lvar_ph = np.mean(log(residuals**2)) - log(info_ph)
E_var[i], E_var_bu[i], E_var_ph[i] = var, var_bu, var_ph
assert (-np.isnan(E_var[i])).all() # check there is NO NaN
self.add(E_var=E_var, E_var_bu=E_var_bu, E_var_ph=E_var_ph)
return E_var
| gpl-2.0 |
CoolProp/CoolProp | dev/scripts/viscosity_builder.py | 2 | 3895 | from math import sqrt, exp
from CoolProp.CoolProp import Props
import numpy as np
import matplotlib.pyplot as plt
from scipy.odr import *
from math import log
E_K = {'REFPROP-Ammonia': 386,
'REFPROP-Argon': 143.2
}
SIGMA = {'REFPROP-Ammonia': 0.2957,
'REFPROP-Argon': 0.335
}
E_K['REFPROP-Propane'] = 263.88
SIGMA['REFPROP-Propane'] = 0.49748
E_K['REFPROP-R32'] = 289.65
SIGMA['REFPROP-R32'] = 0.4098
E_K['REFPROP-R245fa'] = 329.72
SIGMA['REFPROP-R245fa'] = 0.5529
def viscosity_dilute(fluid, T, e_k, sigma):
"""
T in [K], e_k in [K], sigma in [nm]
viscosity returned is in [Pa-s]
"""
Tstar = T / e_k
molemass = Props(fluid, 'molemass')
if fluid == 'Propane' or fluid == 'REFPROP-Propane':
a = [0.25104574, -0.47271238, 0, 0.060836515, 0]
theta_star = exp(a[0] * pow(log(Tstar), 0) + a[1] * pow(log(Tstar), 1) + a[3] * pow(log(Tstar), 3));
eta_star = 0.021357 * sqrt(molemass * T) / (pow(sigma, 2) * theta_star) / 1e6;
return eta_star
# From Neufeld, 1972, Journal of Chemical Physics - checked coefficients
OMEGA_2_2 = 1.16145 * pow(Tstar, -0.14874) + 0.52487 * exp(-0.77320 * Tstar) + 2.16178 * exp(-2.43787 * Tstar)
# Using the leading constant from McLinden, 2000 since the leading term from Huber 2003 gives crazy values
eta_star = 26.692e-3 * sqrt(molemass * T) / (pow(sigma, 2) * OMEGA_2_2) / 1e6
return eta_star
def viscosity_linear(fluid, T, rho, e_k, sigma):
"""
Implements the method of Vogel 1998 (Propane) for the linear part
"""
N_A = 6.02214129e23
molemass = Props(fluid, 'molemass')
Tstar = T / e_k
b = [-19.572881, 219.73999, -1015.3226, 2471.01251, -3375.1717, 2491.6597, -787.26086, 14.085455, -0.34664158]
s = sum([b[i] * pow(Tstar, -0.25 * i) for i in range(7)])
B_eta_star = s + b[7] * pow(Tstar, -2.5) + b[8] * pow(Tstar, -5.5) # //[no units]
B_eta = N_A * pow(sigma / 1e9, 3) * B_eta_star # [m3/mol]
return viscosity_dilute(fluid, T, e_k, sigma) * B_eta * rho / molemass * 1000
from PDSim.misc.datatypes import Collector
RHO = Collector()
TT = Collector()
DELTA = Collector()
TAU = Collector()
VV = Collector()
VV0 = Collector()
VV1 = Collector()
VVH = Collector()
fluid = 'REFPROP-R32'
Tc = Props(fluid, 'Tcrit')
rhoc = Props(fluid, 'rhocrit')
for T in np.linspace(290, Props(fluid, 'Tcrit') - 0.1, 100):
rhoV = Props('D', 'T', T, 'Q', 1, fluid)
rhoL = Props('D', 'T', T, 'Q', 0, fluid)
rhomax = Props('D', 'T', Props(fluid, 'Tmin'), 'Q', 0, fluid)
for rho in list(np.linspace(rhoL, rhomax, 100)): # +list(np.linspace(rhoV,0.0001,100)):
# for rho in list(np.linspace(rhoV,0.0001,100)):
mu_0 = viscosity_dilute(fluid, T, E_K[fluid], SIGMA[fluid])
mu_1 = viscosity_linear(fluid, T, rho, E_K[fluid], SIGMA[fluid])
mu = Props('V', 'T', T, 'D', rho, fluid)
VV << mu
VV0 << mu_0
VV1 << mu_1
VVH << mu - mu_0 - mu_1
TT << T
RHO << rho
DELTA << rho / rhoc
TAU << Tc / T
def f_RHS(E, DELTA_TAU, VV):
k = 0
sum = 0
DELTA = DELTA_TAU[0, :]
TAU = DELTA_TAU[1, :]
for i in range(2, 5):
for j in range(3):
sum += E[k] * DELTA**i / TAU**j
k += 1
# f1,f2,f3,g1,g2 = E[k],E[k+1],E[k+2],E[k+3],E[k+4]
# DELTA0 = g1*(1+g2*np.sqrt(TAU))
# sum += (f1+f2/TAU+f3/TAU/TAU)*(DELTA/(DELTA0-DELTA)-DELTA/DELTA0)
print('%s %%' % np.mean(np.abs(((sum / VV - 1) * 100))))
return sum
log_muH = np.log(VVH.v().T)
x = np.c_[DELTA.v().T, TAU.v().T].T
y = VVH.v()
linear = Model(f_RHS, extra_args=(y,))
mydata = Data(x, y)
myodr = ODR(mydata, linear, beta0=np.array([0.1] * 17),)
myoutput = myodr.run()
E = myoutput.beta
print(E)
#plt.plot(TT.vec, y,'b.',TT.vec, f_RHS(E, x, y),'r.')
# plt.show()
# plt.plot()
plt.plot(y.T, f_RHS(E, x, y))
plt.show()
| mit |
sniemi/SamPy | sandbox/src1/examples/font_indexing.py | 4 | 1299 | """
A little example that shows how the various indexing into the font
tables relate to one another. Mainly for mpl developers....
"""
import matplotlib
from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, KERNING_UNFITTED, KERNING_UNSCALED
#fname = '/usr/share/fonts/sfd/FreeSans.ttf'
fname = matplotlib.get_data_path() + '/fonts/ttf/Vera.ttf'
font = FT2Font(fname)
font.set_charmap(0)
codes = font.get_charmap().items()
#dsu = [(ccode, glyphind) for ccode, glyphind in codes]
#dsu.sort()
#for ccode, glyphind in dsu:
# try: name = font.get_glyph_name(glyphind)
# except RuntimeError: pass
# else: print '% 4d % 4d %s %s'%(glyphind, ccode, hex(int(ccode)), name)
# make a charname to charcode and glyphind dictionary
coded = {}
glyphd = {}
for ccode, glyphind in codes:
name = font.get_glyph_name(glyphind)
coded[name] = ccode
glyphd[name] = glyphind
code = coded['A']
glyph = font.load_char(code)
#print glyph.bbox
print glyphd['A'], glyphd['V'], coded['A'], coded['V']
print 'AV', font.get_kerning(glyphd['A'], glyphd['V'], KERNING_DEFAULT)
print 'AV', font.get_kerning(glyphd['A'], glyphd['V'], KERNING_UNFITTED)
print 'AV', font.get_kerning(glyphd['A'], glyphd['V'], KERNING_UNSCALED)
print 'AV', font.get_kerning(glyphd['A'], glyphd['T'], KERNING_UNSCALED)
| bsd-2-clause |
edhuckle/statsmodels | statsmodels/examples/example_enhanced_boxplots.py | 33 | 3179 |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
# Necessary to make horizontal axis labels fit
plt.rcParams['figure.subplot.bottom'] = 0.23
data = sm.datasets.anes96.load_pandas()
party_ID = np.arange(7)
labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat",
"Independent-Independent", "Independent-Republican",
"Weak Republican", "Strong Republican"]
# Group age by party ID.
age = [data.exog['age'][data.endog == id] for id in party_ID]
# Create a violin plot.
fig = plt.figure()
ax = fig.add_subplot(111)
sm.graphics.violinplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
ax.set_xlabel("Party identification of respondent.")
ax.set_ylabel("Age")
ax.set_title("US national election '96 - Age & Party Identification")
# Create a bean plot.
fig2 = plt.figure()
ax = fig2.add_subplot(111)
sm.graphics.beanplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
ax.set_xlabel("Party identification of respondent.")
ax.set_ylabel("Age")
ax.set_title("US national election '96 - Age & Party Identification")
# Create a jitter plot.
fig3 = plt.figure()
ax = fig3.add_subplot(111)
plot_opts={'cutoff_val':5, 'cutoff_type':'abs', 'label_fontsize':'small',
'label_rotation':30, 'violin_fc':(0.8, 0.8, 0.8),
'jitter_marker':'.', 'jitter_marker_size':3, 'bean_color':'#FF6F00',
'bean_mean_color':'#009D91'}
sm.graphics.beanplot(age, ax=ax, labels=labels, jitter=True,
plot_opts=plot_opts)
ax.set_xlabel("Party identification of respondent.")
ax.set_ylabel("Age")
ax.set_title("US national election '96 - Age & Party Identification")
# Create an asymmetrical jitter plot.
ix = data.exog['income'] < 16 # incomes < $30k
age = data.exog['age'][ix]
endog = data.endog[ix]
age_lower_income = [age[endog == id] for id in party_ID]
ix = data.exog['income'] >= 20 # incomes > $50k
age = data.exog['age'][ix]
endog = data.endog[ix]
age_higher_income = [age[endog == id] for id in party_ID]
fig = plt.figure()
ax = fig.add_subplot(111)
plot_opts['violin_fc'] = (0.5, 0.5, 0.5)
plot_opts['bean_show_mean'] = False
plot_opts['bean_show_median'] = False
plot_opts['bean_legend_text'] = 'Income < \$30k'
plot_opts['cutoff_val'] = 10
sm.graphics.beanplot(age_lower_income, ax=ax, labels=labels, side='left',
jitter=True, plot_opts=plot_opts)
plot_opts['violin_fc'] = (0.7, 0.7, 0.7)
plot_opts['bean_color'] = '#009D91'
plot_opts['bean_legend_text'] = 'Income > \$50k'
sm.graphics.beanplot(age_higher_income, ax=ax, labels=labels, side='right',
jitter=True, plot_opts=plot_opts)
ax.set_xlabel("Party identification of respondent.")
ax.set_ylabel("Age")
ax.set_title("US national election '96 - Age & Party Identification")
# Show all plots.
plt.show()
| bsd-3-clause |
Tahsin-Mayeesha/Udacity-Machine-Learning-Nanodegree | projects/titanic_survival_exploration/titanic_visualizations.py | 24 | 5425 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def filter_data(data, condition):
"""
Remove elements that do not match the condition provided.
Takes a data list as input and returns a filtered list.
Conditions should be a list of strings of the following format:
'<field> <op> <value>'
where the following operations are valid: >, <, >=, <=, ==, !=
Example: ["Sex == 'male'", 'Age < 18']
"""
field, op, value = condition.split(" ")
# convert value into number or strip excess quotes if string
try:
value = float(value)
except:
value = value.strip("\'\"")
# get booleans for filtering
if op == ">":
matches = data[field] > value
elif op == "<":
matches = data[field] < value
elif op == ">=":
matches = data[field] >= value
elif op == "<=":
matches = data[field] <= value
elif op == "==":
matches = data[field] == value
elif op == "!=":
matches = data[field] != value
else: # catch invalid operation codes
raise Exception("Invalid comparison operator. Only >, <, >=, <=, ==, != allowed.")
# filter data and outcomes
data = data[matches].reset_index(drop = True)
return data
def survival_stats(data, outcomes, key, filters = []):
"""
Print out selected statistics regarding survival, given a feature of
interest and any number of filters (including no filters)
"""
# Check that the key exists
if key not in data.columns.values :
print "'{}' is not a feature of the Titanic data. Did you spell something wrong?".format(key)
return False
# Return the function before visualizing if 'Cabin' or 'Ticket'
# is selected: too many unique categories to display
if(key == 'Cabin' or key == 'PassengerId' or key == 'Ticket'):
print "'{}' has too many unique categories to display! Try a different feature.".format(key)
return False
# Merge data and outcomes into single dataframe
all_data = pd.concat([data, outcomes], axis = 1)
# Apply filters to data
for condition in filters:
all_data = filter_data(all_data, condition)
# Create outcomes DataFrame
all_data = all_data[[key, 'Survived']]
# Create plotting figure
plt.figure(figsize=(8,6))
# 'Numerical' features
if(key == 'Age' or key == 'Fare'):
# Remove NaN values from Age data
all_data = all_data[~np.isnan(all_data[key])]
# Divide the range of data into bins and count survival rates
min_value = all_data[key].min()
max_value = all_data[key].max()
value_range = max_value - min_value
# 'Fares' has larger range of values than 'Age' so create more bins
if(key == 'Fare'):
bins = np.arange(0, all_data['Fare'].max() + 20, 20)
if(key == 'Age'):
bins = np.arange(0, all_data['Age'].max() + 10, 10)
# Overlay each bin's survival rates
nonsurv_vals = all_data[all_data['Survived'] == 0][key].reset_index(drop = True)
surv_vals = all_data[all_data['Survived'] == 1][key].reset_index(drop = True)
plt.hist(nonsurv_vals, bins = bins, alpha = 0.6,
color = 'red', label = 'Did not survive')
plt.hist(surv_vals, bins = bins, alpha = 0.6,
color = 'green', label = 'Survived')
# Add legend to plot
plt.xlim(0, bins.max())
plt.legend(framealpha = 0.8)
# 'Categorical' features
else:
# Set the various categories
if(key == 'Pclass'):
values = np.arange(1,4)
if(key == 'Parch' or key == 'SibSp'):
values = np.arange(0,np.max(data[key]) + 1)
if(key == 'Embarked'):
values = ['C', 'Q', 'S']
if(key == 'Sex'):
values = ['male', 'female']
# Create DataFrame containing categories and count of each
frame = pd.DataFrame(index = np.arange(len(values)), columns=(key,'Survived','NSurvived'))
for i, value in enumerate(values):
frame.loc[i] = [value, \
len(all_data[(all_data['Survived'] == 1) & (all_data[key] == value)]), \
len(all_data[(all_data['Survived'] == 0) & (all_data[key] == value)])]
# Set the width of each bar
bar_width = 0.4
# Display each category's survival rates
for i in np.arange(len(frame)):
nonsurv_bar = plt.bar(i-bar_width, frame.loc[i]['NSurvived'], width = bar_width, color = 'r')
surv_bar = plt.bar(i, frame.loc[i]['Survived'], width = bar_width, color = 'g')
plt.xticks(np.arange(len(frame)), values)
plt.legend((nonsurv_bar[0], surv_bar[0]),('Did not survive', 'Survived'), framealpha = 0.8)
# Common attributes for plot formatting
plt.xlabel(key)
plt.ylabel('Number of Passengers')
plt.title('Passenger Survival Statistics With \'%s\' Feature'%(key))
plt.show()
# Report number of passengers with missing values
if sum(pd.isnull(all_data[key])):
nan_outcomes = all_data[pd.isnull(all_data[key])]['Survived']
print "Passengers with missing '{}' values: {} ({} survived, {} did not survive)".format( \
key, len(nan_outcomes), sum(nan_outcomes == 1), sum(nan_outcomes == 0))
| mit |
google/material-design-icons | update/venv/lib/python3.9/site-packages/fontTools/varLib/plot.py | 5 | 4153 | """Visualize DesignSpaceDocument and resulting VariationModel."""
from fontTools.varLib.models import VariationModel, supportScalar
from fontTools.designspaceLib import DesignSpaceDocument
from matplotlib import pyplot
from mpl_toolkits.mplot3d import axes3d
from itertools import cycle
import math
import logging
import sys
log = logging.getLogger(__name__)
def stops(support, count=10):
a,b,c = support
return [a + (b - a) * i / count for i in range(count)] + \
[b + (c - b) * i / count for i in range(count)] + \
[c]
def _plotLocationsDots(locations, axes, subplot, **kwargs):
for loc, color in zip(locations, cycle(pyplot.cm.Set1.colors)):
if len(axes) == 1:
subplot.plot(
[loc.get(axes[0], 0)],
[1.],
'o',
color=color,
**kwargs
)
elif len(axes) == 2:
subplot.plot(
[loc.get(axes[0], 0)],
[loc.get(axes[1], 0)],
[1.],
'o',
color=color,
**kwargs
)
else:
raise AssertionError(len(axes))
def plotLocations(locations, fig, names=None, **kwargs):
n = len(locations)
cols = math.ceil(n**.5)
rows = math.ceil(n / cols)
if names is None:
names = [None] * len(locations)
model = VariationModel(locations)
names = [names[model.reverseMapping[i]] for i in range(len(names))]
axes = sorted(locations[0].keys())
if len(axes) == 1:
_plotLocations2D(
model, axes[0], fig, cols, rows, names=names, **kwargs
)
elif len(axes) == 2:
_plotLocations3D(
model, axes, fig, cols, rows, names=names, **kwargs
)
else:
raise ValueError("Only 1 or 2 axes are supported")
def _plotLocations2D(model, axis, fig, cols, rows, names, **kwargs):
subplot = fig.add_subplot(111)
for i, (support, color, name) in enumerate(
zip(model.supports, cycle(pyplot.cm.Set1.colors), cycle(names))
):
if name is not None:
subplot.set_title(name)
subplot.set_xlabel(axis)
pyplot.xlim(-1.,+1.)
Xs = support.get(axis, (-1.,0.,+1.))
X, Y = [], []
for x in stops(Xs):
y = supportScalar({axis:x}, support)
X.append(x)
Y.append(y)
subplot.plot(X, Y, color=color, **kwargs)
_plotLocationsDots(model.locations, [axis], subplot)
def _plotLocations3D(model, axes, fig, rows, cols, names, **kwargs):
ax1, ax2 = axes
axis3D = fig.add_subplot(111, projection='3d')
for i, (support, color, name) in enumerate(
zip(model.supports, cycle(pyplot.cm.Set1.colors), cycle(names))
):
if name is not None:
axis3D.set_title(name)
axis3D.set_xlabel(ax1)
axis3D.set_ylabel(ax2)
pyplot.xlim(-1.,+1.)
pyplot.ylim(-1.,+1.)
Xs = support.get(ax1, (-1.,0.,+1.))
Ys = support.get(ax2, (-1.,0.,+1.))
for x in stops(Xs):
X, Y, Z = [], [], []
for y in Ys:
z = supportScalar({ax1:x, ax2:y}, support)
X.append(x)
Y.append(y)
Z.append(z)
axis3D.plot(X, Y, Z, color=color, **kwargs)
for y in stops(Ys):
X, Y, Z = [], [], []
for x in Xs:
z = supportScalar({ax1:x, ax2:y}, support)
X.append(x)
Y.append(y)
Z.append(z)
axis3D.plot(X, Y, Z, color=color, **kwargs)
_plotLocationsDots(model.locations, [ax1, ax2], axis3D)
def plotDocument(doc, fig, **kwargs):
doc.normalize()
locations = [s.location for s in doc.sources]
names = [s.name for s in doc.sources]
plotLocations(locations, fig, names, **kwargs)
def main(args=None):
from fontTools import configLogger
if args is None:
args = sys.argv[1:]
# configure the library logger (for >= WARNING)
configLogger()
# comment this out to enable debug messages from logger
# log.setLevel(logging.DEBUG)
if len(args) < 1:
print("usage: fonttools varLib.plot source.designspace", file=sys.stderr)
print(" or")
print("usage: fonttools varLib.plot location1 location2 ...", file=sys.stderr)
sys.exit(1)
fig = pyplot.figure()
fig.set_tight_layout(True)
if len(args) == 1 and args[0].endswith('.designspace'):
doc = DesignSpaceDocument()
doc.read(args[0])
plotDocument(doc, fig)
else:
axes = [chr(c) for c in range(ord('A'), ord('Z')+1)]
locs = [dict(zip(axes, (float(v) for v in s.split(',')))) for s in args]
plotLocations(locs, fig)
pyplot.show()
if __name__ == '__main__':
import sys
sys.exit(main())
| apache-2.0 |
cauchycui/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
xubenben/scikit-learn | examples/manifold/plot_lle_digits.py | 181 | 8510 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing LDA projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = lda.LDA(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/mpl_toolkits/axes_grid1/anchored_artists.py | 10 | 12803 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib import docstring
from matplotlib.offsetbox import (AnchoredOffsetbox, AnchoredText,
AnnotationBbox, AuxTransformBox, DrawingArea,
TextArea, VPacker)
from matplotlib.patches import Rectangle, Ellipse
class AnchoredDrawingArea(AnchoredOffsetbox):
@docstring.dedent
def __init__(self, width, height, xdescent, ydescent,
loc, pad=0.4, borderpad=0.5, prop=None, frameon=True,
**kwargs):
"""
An anchored container with a fixed size and fillable DrawingArea.
Artists added to the *drawing_area* will have their coordinates
interpreted as pixels. Any transformations set on the artists will be
overridden.
Parameters
----------
width, height : int or float
width and height of the container, in pixels.
xdescent, ydescent : int or float
descent of the container in the x- and y- direction, in pixels.
loc : int
Location of this artist. Valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
pad : int or float, optional
Padding around the child objects, in fraction of the font
size. Defaults to 0.4.
borderpad : int or float, optional
Border padding, in fraction of the font size.
Defaults to 0.5.
prop : `matplotlib.font_manager.FontProperties`, optional
Font property used as a reference for paddings.
frameon : bool, optional
If True, draw a box around this artists. Defaults to True.
**kwargs :
Keyworded arguments to pass to
:class:`matplotlib.offsetbox.AnchoredOffsetbox`.
Attributes
----------
drawing_area : `matplotlib.offsetbox.DrawingArea`
A container for artists to display.
Examples
--------
To display blue and red circles of different sizes in the upper right
of an axes *ax*:
>>> ada = AnchoredDrawingArea(20, 20, 0, 0, loc=1, frameon=False)
>>> ada.drawing_area.add_artist(Circle((10, 10), 10, fc="b"))
>>> ada.drawing_area.add_artist(Circle((30, 10), 5, fc="r"))
>>> ax.add_artist(ada)
"""
self.da = DrawingArea(width, height, xdescent, ydescent)
self.drawing_area = self.da
super(AnchoredDrawingArea, self).__init__(
loc, pad=pad, borderpad=borderpad, child=self.da, prop=None,
frameon=frameon, **kwargs
)
class AnchoredAuxTransformBox(AnchoredOffsetbox):
@docstring.dedent
def __init__(self, transform, loc,
pad=0.4, borderpad=0.5, prop=None, frameon=True, **kwargs):
"""
An anchored container with transformed coordinates.
Artists added to the *drawing_area* are scaled according to the
coordinates of the transformation used. The dimensions of this artist
will scale to contain the artists added.
Parameters
----------
transform : `matplotlib.transforms.Transform`
The transformation object for the coordinate system in use, i.e.,
:attr:`matplotlib.axes.Axes.transData`.
loc : int
Location of this artist. Valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
pad : int or float, optional
Padding around the child objects, in fraction of the font
size. Defaults to 0.4.
borderpad : int or float, optional
Border padding, in fraction of the font size.
Defaults to 0.5.
prop : `matplotlib.font_manager.FontProperties`, optional
Font property used as a reference for paddings.
frameon : bool, optional
If True, draw a box around this artists. Defaults to True.
**kwargs :
Keyworded arguments to pass to
:class:`matplotlib.offsetbox.AnchoredOffsetbox`.
Attributes
----------
drawing_area : `matplotlib.offsetbox.AuxTransformBox`
A container for artists to display.
Examples
--------
To display an ellipse in the upper left, with a width of 0.1 and
height of 0.4 in data coordinates:
>>> box = AnchoredAuxTransformBox(ax.transData, loc=2)
>>> el = Ellipse((0,0), width=0.1, height=0.4, angle=30)
>>> box.drawing_area.add_artist(el)
>>> ax.add_artist(box)
"""
self.drawing_area = AuxTransformBox(transform)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad,
child=self.drawing_area,
prop=prop,
frameon=frameon,
**kwargs)
class AnchoredEllipse(AnchoredOffsetbox):
@docstring.dedent
def __init__(self, transform, width, height, angle, loc,
pad=0.1, borderpad=0.1, prop=None, frameon=True, **kwargs):
"""
Draw an anchored ellipse of a given size.
Parameters
----------
transform : `matplotlib.transforms.Transform`
The transformation object for the coordinate system in use, i.e.,
:attr:`matplotlib.axes.Axes.transData`.
width, height : int or float
Width and height of the ellipse, given in coordinates of
*transform*.
angle : int or float
Rotation of the ellipse, in degrees, anti-clockwise.
loc : int
Location of this size bar. Valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
pad : int or float, optional
Padding around the ellipse, in fraction of the font size. Defaults
to 0.1.
borderpad : int or float, optional
Border padding, in fraction of the font size. Defaults to 0.1.
frameon : bool, optional
If True, draw a box around the ellipse. Defaults to True.
prop : `matplotlib.font_manager.FontProperties`, optional
Font property used as a reference for paddings.
**kwargs :
Keyworded arguments to pass to
:class:`matplotlib.offsetbox.AnchoredOffsetbox`.
Attributes
----------
ellipse : `matplotlib.patches.Ellipse`
Ellipse patch drawn.
"""
self._box = AuxTransformBox(transform)
self.ellipse = Ellipse((0, 0), width, height, angle)
self._box.add_artist(self.ellipse)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad,
child=self._box,
prop=prop,
frameon=frameon, **kwargs)
class AnchoredSizeBar(AnchoredOffsetbox):
@docstring.dedent
def __init__(self, transform, size, label, loc,
pad=0.1, borderpad=0.1, sep=2,
frameon=True, size_vertical=0, color='black',
label_top=False, fontproperties=None,
**kwargs):
"""
Draw a horizontal scale bar with a center-aligned label underneath.
Parameters
----------
transform : `matplotlib.transforms.Transform`
The transformation object for the coordinate system in use, i.e.,
:attr:`matplotlib.axes.Axes.transData`.
size : int or float
Horizontal length of the size bar, given in coordinates of
*transform*.
label : str
Label to display.
loc : int
Location of this size bar. Valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
pad : int or float, optional
Padding around the label and size bar, in fraction of the font
size. Defaults to 0.1.
borderpad : int or float, optional
Border padding, in fraction of the font size.
Defaults to 0.1.
sep : int or float, optional
Seperation between the label and the size bar, in points.
Defaults to 2.
frameon : bool, optional
If True, draw a box around the horizontal bar and label.
Defaults to True.
size_vertical : int or float, optional
Vertical length of the size bar, given in coordinates of
*transform*. Defaults to 0.
color : str, optional
Color for the size bar and label.
Defaults to black.
label_top : bool, optional
If True, the label will be over the size bar.
Defaults to False.
fontproperties : `matplotlib.font_manager.FontProperties`, optional
Font properties for the label text.
**kwargs :
Keyworded arguments to pass to
:class:`matplotlib.offsetbox.AnchoredOffsetbox`.
Attributes
----------
size_bar : `matplotlib.offsetbox.AuxTransformBox`
Container for the size bar.
txt_label : `matplotlib.offsetbox.TextArea`
Container for the label of the size bar.
Notes
-----
If *prop* is passed as a keyworded argument, but *fontproperties* is
not, then *prop* is be assumed to be the intended *fontproperties*.
Using both *prop* and *fontproperties* is not supported.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from mpl_toolkits.axes_grid1.anchored_artists import \
AnchoredSizeBar
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.random.random((10,10)))
>>> bar = AnchoredSizeBar(ax.transData, 3, '3 data units', 4)
>>> ax.add_artist(bar)
>>> fig.show()
Using all the optional parameters
>>> import matplotlib.font_manager as fm
>>> fontprops = fm.FontProperties(size=14, family='monospace')
>>> bar = AnchoredSizeBar(ax.transData, 3, '3 units', 4, pad=0.5, \
sep=5, borderpad=0.5, frameon=False, \
size_vertical=0.5, color='white', \
fontproperties=fontprops)
"""
self.size_bar = AuxTransformBox(transform)
self.size_bar.add_artist(Rectangle((0, 0), size, size_vertical,
fill=False, facecolor=color,
edgecolor=color))
if fontproperties is None and 'prop' in kwargs:
fontproperties = kwargs.pop('prop')
if fontproperties is None:
textprops = {'color': color}
else:
textprops = {'color': color, 'fontproperties': fontproperties}
self.txt_label = TextArea(
label,
minimumdescent=False,
textprops=textprops)
if label_top:
_box_children = [self.txt_label, self.size_bar]
else:
_box_children = [self.size_bar, self.txt_label]
self._box = VPacker(children=_box_children,
align="center",
pad=0, sep=sep)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad,
child=self._box,
prop=fontproperties,
frameon=frameon, **kwargs)
| bsd-3-clause |
mganeva/mantid | qt/applications/workbench/workbench/widgets/plotselector/presenter.py | 1 | 15293 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
#
from __future__ import absolute_import, print_function
import os
import re
from .model import PlotSelectorModel
from .view import PlotSelectorView, Column
class PlotSelectorPresenter(object):
"""
Presenter for the plot selector widget. This class can be
responsible for the creation of the model and view, passing in
the GlobalFigureManager as an argument, or the presenter and view
can be passed as arguments (only intended for testing).
"""
def __init__(self, global_figure_manager, view=None, model=None):
"""
Initialise the presenter, creating the view and model, and
setting the initial plot list
:param global_figure_manager: The GlobalFigureManager class
:param view: Optional - a view to use instead of letting the
class create one (intended for testing)
:param model: Optional - a model to use instead of letting
the class create one (intended for testing)
"""
# Create model and view, or accept mocked versions
if view is None:
self.view = PlotSelectorView(self)
else:
self.view = view
if model is None:
self.model = PlotSelectorModel(self, global_figure_manager)
else:
self.model = model
# Make sure the plot list is up to date
self.update_plot_list()
def get_plot_name_from_number(self, plot_number):
return self.model.get_plot_name_from_number(plot_number)
# ------------------------ Plot Updates ------------------------
def update_plot_list(self):
"""
Updates the plot list in the model and the view. Filter text
is applied to the updated selection if required.
"""
plot_list = self.model.get_plot_list()
self.view.set_plot_list(plot_list)
def append_to_plot_list(self, plot_number):
"""
Appends the plot name to the end of the plot list
:param plot_number: The unique number in GlobalFigureManager
"""
self.view.append_to_plot_list(plot_number)
self.view.set_visibility_icon(plot_number, self.model.is_visible(plot_number))
def remove_from_plot_list(self, plot_number):
"""
Removes the plot name from the plot list
:param plot_number: The unique number in GlobalFigureManager
"""
self.view.remove_from_plot_list(plot_number)
def rename_in_plot_list(self, plot_number, new_name):
"""
Replaces a name in the plot list
:param plot_number: The unique number in GlobalFigureManager
:param new_name: The new name for the plot
"""
self.view.rename_in_plot_list(plot_number, new_name)
# ----------------------- Plot Filtering ------------------------
def filter_text_changed(self):
"""
Called by the view when the filter text is changed (e.g. by
typing or clearing the text)
"""
if self.view.get_filter_text():
self.view.filter_plot_list()
else:
self.view.unhide_all_plots()
def is_shown_by_filter(self, plot_number):
"""
:param plot_number: The unique number in GlobalFigureManager
:return: True if shown, or False if filtered out
"""
filter_text = self.view.get_filter_text()
plot_name = self.get_plot_name_from_number(plot_number)
return filter_text.lower() in plot_name.lower()
# ------------------------ Plot Showing ------------------------
def show_single_selected(self):
"""
When a list item is double clicked the view calls this method
to bring the selected plot to the front
"""
plot_number = self.view.get_currently_selected_plot_number()
self._make_plot_active(plot_number)
def show_multiple_selected(self):
"""
Shows multiple selected plots, e.g. from pressing the 'Show'
button with multiple selected plots
"""
selected_plots = self.view.get_all_selected_plot_numbers()
for plot_number in selected_plots:
self._make_plot_active(plot_number)
def _make_plot_active(self, plot_number):
"""
Make the plot with the given name active - bring it to the
front and make it the choice for overplotting
:param plot_number: The unique number in GlobalFigureManager
"""
try:
self.model.show_plot(plot_number)
except ValueError as e:
print(e)
def set_active_font(self, plot_number):
"""
Set the icon for the active plot to be colored
:param plot_number: The unique number in GlobalFigureManager
"""
active_plot_number = self.view.active_plot_number
if active_plot_number > 0:
try:
self.view.set_active_font(active_plot_number, False)
except TypeError:
pass
# The last active plot could have been closed
# already, so there is nothing to do
self.view.set_active_font(plot_number, True)
self.view.active_plot_number = plot_number
# ------------------------ Plot Hiding -------------------------
def hide_selected_plots(self):
"""
Hide all plots that are selected in the view
"""
selected_plots = self.view.get_all_selected_plot_numbers()
for plot_number in selected_plots:
self._hide_plot(plot_number)
def _hide_plot(self, plot_number):
"""
Hides a single plot
"""
try:
self.model.hide_plot(plot_number)
except ValueError as e:
print(e)
def toggle_plot_visibility(self, plot_number):
"""
Toggles a plot between hidden and shown
:param plot_number: The unique number in GlobalFigureManager
"""
if self.model.is_visible(plot_number):
self._hide_plot(plot_number)
else:
self._make_plot_active(plot_number)
self.update_visibility_icon(plot_number)
def update_visibility_icon(self, plot_number):
"""
Updates the icon to indicate a plot as hidden or visible
:param plot_number: The unique number in GlobalFigureManager
"""
try:
is_visible = self.model.is_visible(plot_number)
self.view.set_visibility_icon(plot_number, is_visible)
except ValueError:
# There is a chance the plot was closed, which calls an
# update to this method. If we can not get the visibility
# status it is safe to assume the plot has been closed.
pass
# ------------------------ Plot Renaming ------------------------
def rename_figure(self, plot_number, new_name):
"""
Replaces a name in the plot list
:param plot_number: The unique number in GlobalFigureManager
:param new_name: The new plot name
"""
try:
self.model.rename_figure(plot_number, new_name)
except ValueError as e:
# We need to undo the rename in the view
self.view.rename_in_plot_list(plot_number, new_name)
print(e)
# ------------------------ Plot Closing -------------------------
def close_action_called(self):
"""
This is called by the view when closing plots is requested
(e.g. pressing close or delete).
"""
selected_plots = self.view.get_all_selected_plot_numbers()
self._close_plots(selected_plots)
def close_single_plot(self, plot_number):
"""
This is used to close plots when a close action is called
that does not refer to the selected plot(s)
:param plot_number: The unique number in GlobalFigureManager
"""
self._close_plots([plot_number])
def _close_plots(self, list_of_plot_numbers):
"""
Accepts a list of plot names to close
:param list_of_plots: A list of strings containing plot names
"""
for plot_number in list_of_plot_numbers:
try:
self.model.close_plot(plot_number)
except ValueError as e:
print(e)
# ----------------------- Plot Sorting --------------------------
def set_sort_order(self, is_ascending):
"""
Sets the sort order in the view
:param is_ascending: If true ascending order, else descending
"""
self.view.set_sort_order(is_ascending)
def set_sort_type(self, sort_type):
"""
Sets the sort order in the view
:param sort_type: A Column enum with the column to sort on
"""
self.view.set_sort_type(sort_type)
self.update_last_active_order()
def update_last_active_order(self):
"""
Update the sort keys in the view. This is only required when
changes to the last shown order occur in the model, when
renaming the key is set already
"""
if self.view.sort_type() == Column.LastActive:
self._set_last_active_order()
def _set_last_active_order(self):
"""
Set the last shown order in the view. This checks the sorting
currently set and then sets the sort keys to the appropriate
values
"""
last_active_values = self.model.last_active_values()
self.view.set_last_active_values(last_active_values)
def get_initial_last_active_value(self, plot_number):
"""
Gets the initial last active value for a plot just added, in
this case it is assumed to not have been shown
:param plot_number: The unique number in GlobalFigureManager
:return: A string with the last active value
"""
return '_' + self.model.get_plot_name_from_number(plot_number)
def get_renamed_last_active_value(self, plot_number, old_last_active_value):
"""
Gets the initial last active value for a plot that was
renamed. If the plot had a numeric value, i.e. has been shown
this is retained, else it is set
:param plot_number: The unique number in GlobalFigureManager
:param old_last_active_value: The previous last active value
"""
if old_last_active_value.isdigit():
return old_last_active_value
else:
return self.get_initial_last_active_value(plot_number)
# ---------------------- Plot Exporting -------------------------
def export_plots_called(self, extension):
"""
Export plots called from the view, then a single or multiple
plots exported depending on the number currently selected
:param extension: The file extension as a string including
a '.', for example '.png' (must be a type
supported by matplotlib)
"""
plot_numbers = self.view.get_all_selected_plot_numbers()
if len(plot_numbers) == 1:
self._export_single_plot(plot_numbers[0], extension)
elif len(plot_numbers) > 1:
self._export_multiple_plots(plot_numbers, extension)
def _export_single_plot(self, plot_number, extension):
"""
Called when a single plot is selected to export - prompts for
a filename then tries to save the plot
:param plot_number: The unique number in GlobalFigureManager
:param extension: The file extension as a string including
a '.', for example '.png' (must be a type
supported by matplotlib)
"""
absolute_path = self.view.get_file_name_for_saving(extension)
if not absolute_path[-4:] == extension:
absolute_path += extension
try:
self.model.export_plot(plot_number, absolute_path)
except ValueError as e:
print(e)
def _export_multiple_plots(self, plot_numbers, extension):
"""
Export all selected plots in the plot_numbers list, first
prompting for a save directory then sanitising plot names to
unique, usable file names
:param plot_numbers: A list of plot numbers to export
:param extension: The file extension as a string including
a '.', for example '.png' (must be a type
supported by matplotlib)
"""
dir_name = self.view.get_directory_name_for_saving()
# A temporary dictionary holding plot numbers as keys, plot
# names as values
plots = {}
for plot_number in plot_numbers:
plot_name = self.model.get_plot_name_from_number(plot_number)
plot_name = self._replace_special_characters(plot_name)
if plot_name in plots.values():
plot_name = self._make_unique_name(plot_name, plots)
plots[plot_number] = plot_name
self._export_plot(plot_number, plot_name, dir_name, extension)
def _replace_special_characters(self, string):
"""
Removes any characters that are not valid in file names
across all operating systems ('/' for Linux/Mac), more for
Windows
:param string: The string to replace characters in
:return: The string with special characters replace by '-'
"""
return re.sub(r'[<>:"/|\\?*]', r'-', string)
def _make_unique_name(self, name, dictionary):
"""
Given a name and a dictionary, make a unique name that does
not already exist in the dictionary values by appending
' (1)', ' (2)', ' (3)' etc. to the end of the name
:param name: A string with the non-unique name
:param dictionary: A dictionary with string values
:return : The unique plot name
"""
i = 1
while True:
plot_name_attempt = name + ' ({})'.format(str(i))
if plot_name_attempt not in dictionary.values():
break
i += 1
return plot_name_attempt
def _export_plot(self, plot_number, plot_name, dir_name, extension):
"""
Given a plot number, plot name, directory and extension
construct the absolute path name and call the model to save
the figure
:param plot_number: The unique number in GlobalFigureManager
:param plot_name: The name to use for saving
:param dir_name: The directory to save to
:param extension: The file extension as a string including
a '.', for example '.png' (must be a type
supported by matplotlib)
"""
if dir_name:
filename = os.path.join(dir_name, plot_name + extension)
try:
self.model.export_plot(plot_number, filename)
except ValueError as e:
print(e)
| gpl-3.0 |
wangkua1/sportvu | sportvu/detection_from_raw_pred.py | 1 | 3391 | """detection_from_raw_pred.py
* not super useful, a simple script that plots a) raw pred, b) gt pnr, c) detector output
at 1 single setting
Usage:
detection_from_raw_pred.py <fold_index> <f_data_config> <f_model_config> <f_detect_config> --train
Arguments:
Example:
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sys
import os
from tqdm import tqdm
from docopt import docopt
import yaml
import gc
import matplotlib.pylab as plt
import cPickle as pkl
##
from sportvu.data.dataset import BaseDataset
from sportvu.detect.running_window_p import RunWindowP
from sportvu.detect.nms import NMS
from sportvu.detect.utils import smooth_1D_array
arguments = docopt(__doc__)
print ("...Docopt... ")
print(arguments)
print ("............\n")
f_data_config = arguments['<f_data_config>']
f_model_config = arguments['<f_model_config>']
f_detect_config = arguments['<f_detect_config>']
if arguments['--train']:
dataset = BaseDataset(f_data_config, fold_index=int(arguments['<fold_index>']), load_raw=True)
# pre_trained = arguments['<pre_trained>']
data_config = yaml.load(open(f_data_config, 'rb'))
model_config = yaml.load(open(f_model_config, 'rb'))
model_name = os.path.basename(f_model_config).split('.')[0]
data_name = os.path.basename(f_data_config).split('.')[0]
exp_name = '%s-X-%s' % (model_name, data_name)
detect_config = yaml.load(open(f_detect_config, 'rb'))
detector = eval(detect_config['class'])(detect_config)
plot_folder = os.path.join('./plots', exp_name)
if not os.path.exists(plot_folder):
raise Exception('Run test.py first to get raw predictions')
def label_in_cand(cand, labels):
for l in labels:
if l > cand[1] and l < cand[0]:
return True
return False
plt.figure()
if arguments['--train']:
split = 'train'
else:
split = 'val'
all_pred_f = filter(lambda s:'.pkl' in s and split in s
and 'meta' not in s,os.listdir(os.path.join(plot_folder,'pkl')))
if arguments['--train']:
annotations = []
for _, f in tqdm(enumerate(all_pred_f)):
ind = int(f.split('.')[0].split('-')[1])
gameclocks, pnr_probs, labels = pkl.load(open(os.path.join(plot_folder,'pkl/%s-%i.pkl'%(split,ind)), 'rb'))
meta = pkl.load( open(
os.path.join(plot_folder, 'pkl/%s-meta-%i.pkl' %(split, ind)), 'rb'))
cands, mp, frame_indices = detector.detect(pnr_probs, gameclocks, True)
print (cands)
plt.plot(gameclocks, pnr_probs, '-')
if mp is not None:
plt.plot(gameclocks, mp, '-')
plt.plot(np.array(labels), np.ones((len(labels))), '.')
for ind, cand in enumerate(cands):
cand_x = np.arange(cand[1], cand[0], .1)
plt.plot(cand_x, np.ones((len(cand_x))) * .95, '-' )
## if FP, record annotations
if arguments['--train'] and not label_in_cand(cand, labels):
anno = {'gameid':meta[1], 'gameclock':gameclocks[frame_indices[ind]],
'eid':meta[0], 'quarter':dataset.games[meta[1]]['events'][meta[0]]['quarter']}
annotations.append(anno)
plt.ylim([0,1])
plt.title('Game: %s, Event: %i'%(meta[1], meta[0]))
plt.savefig(os.path.join(plot_folder, '%s-%s-%i.png' %(detect_config['class'], split, ind)))
plt.clf()
pkl.dump(annotations, open(os.path.join(plot_folder,'pkl/hard-negative-examples.pkl'), 'wb')) | mit |
nolanliou/tensorflow | tensorflow/contrib/losses/python/metric_learning/metric_loss_ops_test.py | 41 | 20535 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for triplet_semihard_loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.losses.python import metric_learning as metric_loss_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
try:
# pylint: disable=g-import-not-at-top
from sklearn import datasets
from sklearn import metrics
HAS_SKLEARN = True
except ImportError:
HAS_SKLEARN = False
def pairwise_distance_np(feature, squared=False):
"""Computes the pairwise distance matrix in numpy.
Args:
feature: 2-D numpy array of size [number of data, feature dimension]
squared: Boolean. If true, output is the pairwise squared euclidean
distance matrix; else, output is the pairwise euclidean distance matrix.
Returns:
pairwise_distances: 2-D numpy array of size
[number of data, number of data].
"""
triu = np.triu_indices(feature.shape[0], 1)
upper_tri_pdists = np.linalg.norm(feature[triu[1]] - feature[triu[0]], axis=1)
if squared:
upper_tri_pdists **= 2.
num_data = feature.shape[0]
pairwise_distances = np.zeros((num_data, num_data))
pairwise_distances[np.triu_indices(num_data, 1)] = upper_tri_pdists
# Make symmetrical.
pairwise_distances = pairwise_distances + pairwise_distances.T - np.diag(
pairwise_distances.diagonal())
return pairwise_distances
class ContrastiveLossTest(test.TestCase):
def testContrastive(self):
with self.test_session():
num_data = 10
feat_dim = 6
margin = 1.0
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(0, 2, size=(num_data,)).astype(np.float32)
# Compute the loss in NP
dist = np.sqrt(
np.sum(np.square(embeddings_anchor - embeddings_positive), axis=1))
loss_np = np.mean(
labels * np.square(dist) +
(1.0 - labels) * np.square(np.maximum(margin - dist, 0.0)))
# Compute the loss with TF
loss_tf = metric_loss_ops.contrastive_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class TripletSemiHardLossTest(test.TestCase):
def testTripletSemiHard(self):
with self.test_session():
num_data = 10
feat_dim = 6
margin = 1.0
num_classes = 4
embedding = np.random.rand(num_data, feat_dim).astype(np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP.
adjacency = np.equal(labels_reshaped, labels_reshaped.T)
pdist_matrix = pairwise_distance_np(embedding, squared=True)
loss_np = 0.0
num_positives = 0.0
for i in range(num_data):
for j in range(num_data):
if adjacency[i][j] > 0.0 and i != j:
num_positives += 1.0
pos_distance = pdist_matrix[i][j]
neg_distances = []
for k in range(num_data):
if adjacency[i][k] == 0:
neg_distances.append(pdist_matrix[i][k])
# Sort by distance.
neg_distances.sort()
chosen_neg_distance = neg_distances[0]
for l in range(len(neg_distances)):
chosen_neg_distance = neg_distances[l]
if chosen_neg_distance > pos_distance:
break
loss_np += np.maximum(
0.0, margin - chosen_neg_distance + pos_distance)
loss_np /= num_positives
# Compute the loss in TF.
loss_tf = metric_loss_ops.triplet_semihard_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embedding),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class LiftedStructLossTest(test.TestCase):
def testLiftedStruct(self):
with self.test_session():
num_data = 10
feat_dim = 6
margin = 1.0
num_classes = 4
embedding = np.random.rand(num_data, feat_dim).astype(np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP
adjacency = np.equal(labels_reshaped, labels_reshaped.T)
pdist_matrix = pairwise_distance_np(embedding)
loss_np = 0.0
num_constraints = 0.0
for i in range(num_data):
for j in range(num_data):
if adjacency[i][j] > 0.0 and i != j:
d_pos = pdist_matrix[i][j]
negs = []
for k in range(num_data):
if not adjacency[i][k]:
negs.append(margin - pdist_matrix[i][k])
for l in range(num_data):
if not adjacency[j][l]:
negs.append(margin - pdist_matrix[j][l])
negs = np.array(negs)
max_elem = np.max(negs)
negs -= max_elem
negs = np.exp(negs)
soft_maximum = np.log(np.sum(negs)) + max_elem
num_constraints += 1.0
this_loss = max(soft_maximum + d_pos, 0)
loss_np += this_loss * this_loss
loss_np = loss_np / num_constraints / 2.0
# Compute the loss in TF
loss_tf = metric_loss_ops.lifted_struct_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embedding),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def convert_to_list_of_sparse_tensor(np_matrix):
list_of_sparse_tensors = []
nrows, ncols = np_matrix.shape
for i in range(nrows):
sp_indices = []
for j in range(ncols):
if np_matrix[i][j] == 1:
sp_indices.append([j])
num_non_zeros = len(sp_indices)
list_of_sparse_tensors.append(sparse_tensor.SparseTensor(
indices=np.array(sp_indices),
values=np.ones((num_non_zeros,)),
dense_shape=np.array([ncols,])))
return list_of_sparse_tensors
class NpairsLossTest(test.TestCase):
def testNpairs(self):
with self.test_session():
num_data = 15
feat_dim = 6
num_classes = 5
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP
reg_term = np.mean(np.sum(np.square(embeddings_anchor), 1))
reg_term += np.mean(np.sum(np.square(embeddings_positive), 1))
reg_term *= 0.25 * reg_lambda
similarity_matrix = np.matmul(embeddings_anchor, embeddings_positive.T)
labels_remapped = np.equal(
labels_reshaped, labels_reshaped.T).astype(np.float32)
labels_remapped /= np.sum(labels_remapped, axis=1, keepdims=True)
xent_loss = math_ops.reduce_mean(nn.softmax_cross_entropy_with_logits(
logits=ops.convert_to_tensor(similarity_matrix),
labels=ops.convert_to_tensor(labels_remapped))).eval()
loss_np = xent_loss + reg_term
# Compute the loss in TF
loss_tf = metric_loss_ops.npairs_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class NpairsLossMultiLabelTest(test.TestCase):
def testNpairsMultiLabelLossWithSingleLabelEqualsNpairsLoss(self):
with self.test_session():
num_data = 15
feat_dim = 6
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.arange(num_data)
labels = np.reshape(labels, -1)
# Compute vanila npairs loss.
loss_npairs = metric_loss_ops.npairs_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda).eval()
# Compute npairs multilabel loss.
labels_one_hot = np.identity(num_data)
loss_npairs_multilabel = metric_loss_ops.npairs_loss_multilabel(
sparse_labels=convert_to_list_of_sparse_tensor(labels_one_hot),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda).eval()
self.assertAllClose(loss_npairs, loss_npairs_multilabel)
def testNpairsMultiLabel(self):
with self.test_session():
num_data = 15
feat_dim = 6
num_classes = 10
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(0, 2, (num_data, num_classes))
# set entire column to one so that each row has at least one bit set.
labels[:, -1] = 1
# Compute the loss in NP
reg_term = np.mean(np.sum(np.square(embeddings_anchor), 1))
reg_term += np.mean(np.sum(np.square(embeddings_positive), 1))
reg_term *= 0.25 * reg_lambda
similarity_matrix = np.matmul(embeddings_anchor, embeddings_positive.T)
labels_remapped = np.dot(labels, labels.T).astype(np.float)
labels_remapped /= np.sum(labels_remapped, 1, keepdims=True)
xent_loss = math_ops.reduce_mean(nn.softmax_cross_entropy_with_logits(
logits=ops.convert_to_tensor(similarity_matrix),
labels=ops.convert_to_tensor(labels_remapped))).eval()
loss_np = xent_loss + reg_term
# Compute the loss in TF
loss_tf = metric_loss_ops.npairs_loss_multilabel(
sparse_labels=convert_to_list_of_sparse_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def compute_ground_truth_cluster_score(feat, y):
y_unique = np.unique(y)
score_gt_np = 0.0
for c in y_unique:
feat_subset = feat[y == c, :]
pdist_subset = pairwise_distance_np(feat_subset)
score_gt_np += -1.0 * np.min(np.sum(pdist_subset, axis=0))
score_gt_np = score_gt_np.astype(np.float32)
return score_gt_np
def compute_cluster_loss_numpy(feat,
y,
margin_multiplier=1.0,
enable_pam_finetuning=True):
if enable_pam_finetuning:
facility = ForwardGreedyFacility(
n_clusters=np.unique(y).size).pam_augmented_fit(feat, y,
margin_multiplier)
else:
facility = ForwardGreedyFacility(
n_clusters=np.unique(y).size).loss_augmented_fit(feat, y,
margin_multiplier)
score_augmented = facility.score_aug_
score_gt = compute_ground_truth_cluster_score(feat, y)
return np.maximum(np.float32(0.0), score_augmented - score_gt)
class ForwardGreedyFacility(object):
def __init__(self, n_clusters=8):
self.n_clusters = n_clusters
self.center_ics_ = None
def _check_init_args(self):
# Check n_clusters.
if (self.n_clusters is None or self.n_clusters <= 0 or
not isinstance(self.n_clusters, int)):
raise ValueError('n_clusters has to be nonnegative integer.')
def loss_augmented_fit(self, feat, y, loss_mult):
"""Fit K-Medoids to the provided data."""
self._check_init_args()
# Check that the array is good and attempt to convert it to
# Numpy array if possible.
feat = self._check_array(feat)
# Apply distance metric to get the distance matrix.
pdists = pairwise_distance_np(feat)
num_data = feat.shape[0]
candidate_ids = list(range(num_data))
candidate_scores = np.zeros(num_data,)
subset = []
k = 0
while k < self.n_clusters:
candidate_scores = []
for i in candidate_ids:
# push i to subset.
subset.append(i)
marginal_cost = -1.0 * np.sum(np.min(pdists[:, subset], axis=1))
loss = 1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset))
candidate_scores.append(marginal_cost + loss_mult * loss)
# remove i from subset.
subset.pop()
# push i_star to subset.
i_star = candidate_ids[np.argmax(candidate_scores)]
subset.append(i_star)
# remove i_star from candidate indices.
candidate_ids.remove(i_star)
k += 1
# Expose labels_ which are the assignments of
# the training data to clusters.
self.labels_ = self._get_cluster_ics(pdists, subset)
# Expose cluster centers, i.e. medoids.
self.cluster_centers_ = feat.take(subset, axis=0)
# Expose indices of chosen cluster centers.
self.center_ics_ = subset
# Expose the score = -\sum_{i \in V} min_{j \in S} || x_i - x_j ||
self.score_ = np.float32(-1.0) * self._get_facility_distance(pdists, subset)
self.score_aug_ = self.score_ + loss_mult * (
1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset)))
self.score_aug_ = self.score_aug_.astype(np.float32)
# Expose the chosen cluster indices.
self.subset_ = subset
return self
def _augmented_update_medoid_ics_in_place(self, pdists, y_gt, cluster_ics,
medoid_ics, loss_mult):
for cluster_idx in range(self.n_clusters):
# y_pred = self._get_cluster_ics(D, medoid_ics)
# Don't prematurely do the assignment step.
# Do this after we've updated all cluster medoids.
y_pred = cluster_ics
if sum(y_pred == cluster_idx) == 0:
# Cluster is empty.
continue
curr_score = (
-1.0 * np.sum(
pdists[medoid_ics[cluster_idx], y_pred == cluster_idx]) +
loss_mult * (1.0 - metrics.normalized_mutual_info_score(
y_gt, y_pred)))
pdist_in = pdists[y_pred == cluster_idx, :]
pdist_in = pdist_in[:, y_pred == cluster_idx]
all_scores_fac = np.sum(-1.0 * pdist_in, axis=1)
all_scores_loss = []
for i in range(y_pred.size):
if y_pred[i] != cluster_idx:
continue
# remove this cluster's current centroid
medoid_ics_i = medoid_ics[:cluster_idx] + medoid_ics[cluster_idx + 1:]
# add this new candidate to the centroid list
medoid_ics_i += [i]
y_pred_i = self._get_cluster_ics(pdists, medoid_ics_i)
all_scores_loss.append(loss_mult * (
1.0 - metrics.normalized_mutual_info_score(y_gt, y_pred_i)))
all_scores = all_scores_fac + all_scores_loss
max_score_idx = np.argmax(all_scores)
max_score = all_scores[max_score_idx]
if max_score > curr_score:
medoid_ics[cluster_idx] = np.where(
y_pred == cluster_idx)[0][max_score_idx]
def pam_augmented_fit(self, feat, y, loss_mult):
pam_max_iter = 5
self._check_init_args()
feat = self._check_array(feat)
pdists = pairwise_distance_np(feat)
self.loss_augmented_fit(feat, y, loss_mult)
print('PAM -1 (before PAM): score: %f, score_aug: %f' % (
self.score_, self.score_aug_))
# Initialize from loss augmented facility location
subset = self.center_ics_
for iter_ in range(pam_max_iter):
# update the cluster assignment
cluster_ics = self._get_cluster_ics(pdists, subset)
# update the medoid for each clusters
self._augmented_update_medoid_ics_in_place(pdists, y, cluster_ics, subset,
loss_mult)
self.score_ = np.float32(-1.0) * self._get_facility_distance(
pdists, subset)
self.score_aug_ = self.score_ + loss_mult * (
1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset)))
self.score_aug_ = self.score_aug_.astype(np.float32)
print('PAM iter: %d, score: %f, score_aug: %f' % (iter_, self.score_,
self.score_aug_))
self.center_ics_ = subset
self.labels_ = cluster_ics
return self
def _check_array(self, feat):
# Check that the number of clusters is less than or equal to
# the number of samples
if self.n_clusters > feat.shape[0]:
raise ValueError('The number of medoids ' + '({}) '.format(
self.n_clusters) + 'must be larger than the number ' +
'of samples ({})'.format(feat.shape[0]))
return feat
def _get_cluster_ics(self, pdists, subset):
"""Returns cluster indices for pdist and current medoid indices."""
# Assign data points to clusters based on
# which cluster assignment yields
# the smallest distance`
cluster_ics = np.argmin(pdists[subset, :], axis=0)
return cluster_ics
def _get_facility_distance(self, pdists, subset):
return np.sum(np.min(pdists[subset, :], axis=0))
class ClusterLossTest(test.TestCase):
def _genClusters(self, n_samples, n_clusters):
blobs = datasets.make_blobs(
n_samples=n_samples, centers=n_clusters)
embedding, labels = blobs
embedding = (embedding - embedding.mean(axis=0)) / embedding.std(axis=0)
embedding = embedding.astype(np.float32)
return embedding, labels
def testClusteringLossPAMOff(self):
if not HAS_SKLEARN:
return
with self.test_session():
margin_multiplier = 10.0
embeddings, labels = self._genClusters(n_samples=128, n_clusters=64)
loss_np = compute_cluster_loss_numpy(
embeddings, labels, margin_multiplier, enable_pam_finetuning=False)
loss_tf = metric_loss_ops.cluster_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embeddings),
margin_multiplier=margin_multiplier,
enable_pam_finetuning=False)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def testClusteringLossPAMOn(self):
if not HAS_SKLEARN:
return
with self.test_session():
margin_multiplier = 10.0
embeddings, labels = self._genClusters(n_samples=128, n_clusters=64)
loss_np = compute_cluster_loss_numpy(
embeddings, labels, margin_multiplier, enable_pam_finetuning=True)
loss_tf = metric_loss_ops.cluster_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embeddings),
margin_multiplier=margin_multiplier,
enable_pam_finetuning=True)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
if __name__ == '__main__':
test.main()
| apache-2.0 |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/ipykernel/kernelapp.py | 5 | 19344 | """An Application for launching a kernel"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import atexit
import os
import sys
import signal
import traceback
import logging
from tornado import ioloop
import zmq
from zmq.eventloop import ioloop as zmq_ioloop
from zmq.eventloop.zmqstream import ZMQStream
from IPython.core.application import (
BaseIPythonApplication, base_flags, base_aliases, catch_config_error
)
from IPython.core.profiledir import ProfileDir
from IPython.core.shellapp import (
InteractiveShellApp, shell_flags, shell_aliases
)
from IPython.utils import io
from ipython_genutils.path import filefind, ensure_dir_exists
from traitlets import (
Any, Instance, Dict, Unicode, Integer, Bool, DottedObjectName, Type, default
)
from ipython_genutils.importstring import import_item
from jupyter_core.paths import jupyter_runtime_dir
from jupyter_client import write_connection_file
from jupyter_client.connect import ConnectionFileMixin
# local imports
from .iostream import IOPubThread
from .heartbeat import Heartbeat
from .ipkernel import IPythonKernel
from .parentpoller import ParentPollerUnix, ParentPollerWindows
from jupyter_client.session import (
Session, session_flags, session_aliases,
)
from .zmqshell import ZMQInteractiveShell
#-----------------------------------------------------------------------------
# Flags and Aliases
#-----------------------------------------------------------------------------
kernel_aliases = dict(base_aliases)
kernel_aliases.update({
'ip' : 'IPKernelApp.ip',
'hb' : 'IPKernelApp.hb_port',
'shell' : 'IPKernelApp.shell_port',
'iopub' : 'IPKernelApp.iopub_port',
'stdin' : 'IPKernelApp.stdin_port',
'control' : 'IPKernelApp.control_port',
'f' : 'IPKernelApp.connection_file',
'transport': 'IPKernelApp.transport',
})
kernel_flags = dict(base_flags)
kernel_flags.update({
'no-stdout' : (
{'IPKernelApp' : {'no_stdout' : True}},
"redirect stdout to the null device"),
'no-stderr' : (
{'IPKernelApp' : {'no_stderr' : True}},
"redirect stderr to the null device"),
'pylab' : (
{'IPKernelApp' : {'pylab' : 'auto'}},
"""Pre-load matplotlib and numpy for interactive use with
the default matplotlib backend."""),
})
# inherit flags&aliases for any IPython shell apps
kernel_aliases.update(shell_aliases)
kernel_flags.update(shell_flags)
# inherit flags&aliases for Sessions
kernel_aliases.update(session_aliases)
kernel_flags.update(session_flags)
_ctrl_c_message = """\
NOTE: When using the `ipython kernel` entry point, Ctrl-C will not work.
To exit, you will have to explicitly quit this process, by either sending
"quit" from a client, or using Ctrl-\\ in UNIX-like environments.
To read more about this, see https://github.com/ipython/ipython/issues/2049
"""
#-----------------------------------------------------------------------------
# Application class for starting an IPython Kernel
#-----------------------------------------------------------------------------
class IPKernelApp(BaseIPythonApplication, InteractiveShellApp,
ConnectionFileMixin):
name='ipython-kernel'
aliases = Dict(kernel_aliases)
flags = Dict(kernel_flags)
classes = [IPythonKernel, ZMQInteractiveShell, ProfileDir, Session]
# the kernel class, as an importstring
kernel_class = Type('ipykernel.ipkernel.IPythonKernel',
klass='ipykernel.kernelbase.Kernel',
help="""The Kernel subclass to be used.
This should allow easy re-use of the IPKernelApp entry point
to configure and launch kernels other than IPython's own.
""").tag(config=True)
kernel = Any()
poller = Any() # don't restrict this even though current pollers are all Threads
heartbeat = Instance(Heartbeat, allow_none=True)
ports = Dict()
subcommands = {
'install': (
'ipykernel.kernelspec.InstallIPythonKernelSpecApp',
'Install the IPython kernel'
),
}
# connection info:
connection_dir = Unicode()
@default('connection_dir')
def _default_connection_dir(self):
return jupyter_runtime_dir()
@property
def abs_connection_file(self):
if os.path.basename(self.connection_file) == self.connection_file:
return os.path.join(self.connection_dir, self.connection_file)
else:
return self.connection_file
# streams, etc.
no_stdout = Bool(False, help="redirect stdout to the null device").tag(config=True)
no_stderr = Bool(False, help="redirect stderr to the null device").tag(config=True)
outstream_class = DottedObjectName('ipykernel.iostream.OutStream',
help="The importstring for the OutStream factory").tag(config=True)
displayhook_class = DottedObjectName('ipykernel.displayhook.ZMQDisplayHook',
help="The importstring for the DisplayHook factory").tag(config=True)
# polling
parent_handle = Integer(int(os.environ.get('JPY_PARENT_PID') or 0),
help="""kill this process if its parent dies. On Windows, the argument
specifies the HANDLE of the parent process, otherwise it is simply boolean.
""").tag(config=True)
interrupt = Integer(int(os.environ.get('JPY_INTERRUPT_EVENT') or 0),
help="""ONLY USED ON WINDOWS
Interrupt this process when the parent is signaled.
""").tag(config=True)
def init_crash_handler(self):
sys.excepthook = self.excepthook
def excepthook(self, etype, evalue, tb):
# write uncaught traceback to 'real' stderr, not zmq-forwarder
traceback.print_exception(etype, evalue, tb, file=sys.__stderr__)
def init_poller(self):
if sys.platform == 'win32':
if self.interrupt or self.parent_handle:
self.poller = ParentPollerWindows(self.interrupt, self.parent_handle)
elif self.parent_handle:
self.poller = ParentPollerUnix()
def _bind_socket(self, s, port):
iface = '%s://%s' % (self.transport, self.ip)
if self.transport == 'tcp':
if port <= 0:
port = s.bind_to_random_port(iface)
else:
s.bind("tcp://%s:%i" % (self.ip, port))
elif self.transport == 'ipc':
if port <= 0:
port = 1
path = "%s-%i" % (self.ip, port)
while os.path.exists(path):
port = port + 1
path = "%s-%i" % (self.ip, port)
else:
path = "%s-%i" % (self.ip, port)
s.bind("ipc://%s" % path)
return port
def write_connection_file(self):
"""write connection info to JSON file"""
cf = self.abs_connection_file
self.log.debug("Writing connection file: %s", cf)
write_connection_file(cf, ip=self.ip, key=self.session.key, transport=self.transport,
shell_port=self.shell_port, stdin_port=self.stdin_port, hb_port=self.hb_port,
iopub_port=self.iopub_port, control_port=self.control_port)
def cleanup_connection_file(self):
cf = self.abs_connection_file
self.log.debug("Cleaning up connection file: %s", cf)
try:
os.remove(cf)
except (IOError, OSError):
pass
self.cleanup_ipc_files()
def init_connection_file(self):
if not self.connection_file:
self.connection_file = "kernel-%s.json"%os.getpid()
try:
self.connection_file = filefind(self.connection_file, ['.', self.connection_dir])
except IOError:
self.log.debug("Connection file not found: %s", self.connection_file)
# This means I own it, and I'll create it in this directory:
ensure_dir_exists(os.path.dirname(self.abs_connection_file), 0o700)
# Also, I will clean it up:
atexit.register(self.cleanup_connection_file)
return
try:
self.load_connection_file()
except Exception:
self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True)
self.exit(1)
def init_sockets(self):
# Create a context, a session, and the kernel sockets.
self.log.info("Starting the kernel at pid: %i", os.getpid())
context = zmq.Context.instance()
# Uncomment this to try closing the context.
# atexit.register(context.term)
self.shell_socket = context.socket(zmq.ROUTER)
self.shell_socket.linger = 1000
self.shell_port = self._bind_socket(self.shell_socket, self.shell_port)
self.log.debug("shell ROUTER Channel on port: %i" % self.shell_port)
self.stdin_socket = context.socket(zmq.ROUTER)
self.stdin_socket.linger = 1000
self.stdin_port = self._bind_socket(self.stdin_socket, self.stdin_port)
self.log.debug("stdin ROUTER Channel on port: %i" % self.stdin_port)
self.control_socket = context.socket(zmq.ROUTER)
self.control_socket.linger = 1000
self.control_port = self._bind_socket(self.control_socket, self.control_port)
self.log.debug("control ROUTER Channel on port: %i" % self.control_port)
self.init_iopub(context)
def init_iopub(self, context):
self.iopub_socket = context.socket(zmq.PUB)
self.iopub_socket.linger = 1000
self.iopub_port = self._bind_socket(self.iopub_socket, self.iopub_port)
self.log.debug("iopub PUB Channel on port: %i" % self.iopub_port)
self.configure_tornado_logger()
self.iopub_thread = IOPubThread(self.iopub_socket, pipe=True)
self.iopub_thread.start()
# backward-compat: wrap iopub socket API in background thread
self.iopub_socket = self.iopub_thread.background_socket
def init_heartbeat(self):
"""start the heart beating"""
# heartbeat doesn't share context, because it mustn't be blocked
# by the GIL, which is accessed by libzmq when freeing zero-copy messages
hb_ctx = zmq.Context()
self.heartbeat = Heartbeat(hb_ctx, (self.transport, self.ip, self.hb_port))
self.hb_port = self.heartbeat.port
self.log.debug("Heartbeat REP Channel on port: %i" % self.hb_port)
self.heartbeat.start()
def log_connection_info(self):
"""display connection info, and store ports"""
basename = os.path.basename(self.connection_file)
if basename == self.connection_file or \
os.path.dirname(self.connection_file) == self.connection_dir:
# use shortname
tail = basename
else:
tail = self.connection_file
lines = [
"To connect another client to this kernel, use:",
" --existing %s" % tail,
]
# log connection info
# info-level, so often not shown.
# frontends should use the %connect_info magic
# to see the connection info
for line in lines:
self.log.info(line)
# also raw print to the terminal if no parent_handle (`ipython kernel`)
# unless log-level is CRITICAL (--quiet)
if not self.parent_handle and self.log_level < logging.CRITICAL:
io.rprint(_ctrl_c_message)
for line in lines:
io.rprint(line)
self.ports = dict(shell=self.shell_port, iopub=self.iopub_port,
stdin=self.stdin_port, hb=self.hb_port,
control=self.control_port)
def init_blackhole(self):
"""redirects stdout/stderr to devnull if necessary"""
if self.no_stdout or self.no_stderr:
blackhole = open(os.devnull, 'w')
if self.no_stdout:
sys.stdout = sys.__stdout__ = blackhole
if self.no_stderr:
sys.stderr = sys.__stderr__ = blackhole
def init_io(self):
"""Redirect input streams and set a display hook."""
if self.outstream_class:
outstream_factory = import_item(str(self.outstream_class))
sys.stdout = outstream_factory(self.session, self.iopub_thread, u'stdout')
sys.stderr = outstream_factory(self.session, self.iopub_thread, u'stderr')
if self.displayhook_class:
displayhook_factory = import_item(str(self.displayhook_class))
self.displayhook = displayhook_factory(self.session, self.iopub_socket)
sys.displayhook = self.displayhook
self.patch_io()
def patch_io(self):
"""Patch important libraries that can't handle sys.stdout forwarding"""
try:
import faulthandler
except ImportError:
pass
else:
# Warning: this is a monkeypatch of `faulthandler.enable`, watch for possible
# updates to the upstream API and update accordingly (up-to-date as of Python 3.5):
# https://docs.python.org/3/library/faulthandler.html#faulthandler.enable
# change default file to __stderr__ from forwarded stderr
faulthandler_enable = faulthandler.enable
def enable(file=sys.__stderr__, all_threads=True, **kwargs):
return faulthandler_enable(file=file, all_threads=all_threads, **kwargs)
faulthandler.enable = enable
if hasattr(faulthandler, 'register'):
faulthandler_register = faulthandler.register
def register(signum, file=sys.__stderr__, all_threads=True, chain=False, **kwargs):
return faulthandler_register(signum, file=file, all_threads=all_threads,
chain=chain, **kwargs)
faulthandler.register = register
def init_signal(self):
signal.signal(signal.SIGINT, signal.SIG_IGN)
def init_kernel(self):
"""Create the Kernel object itself"""
shell_stream = ZMQStream(self.shell_socket)
control_stream = ZMQStream(self.control_socket)
kernel_factory = self.kernel_class.instance
kernel = kernel_factory(parent=self, session=self.session,
shell_streams=[shell_stream, control_stream],
iopub_thread=self.iopub_thread,
iopub_socket=self.iopub_socket,
stdin_socket=self.stdin_socket,
log=self.log,
profile_dir=self.profile_dir,
user_ns=self.user_ns,
)
kernel.record_ports({
name + '_port': port for name, port in self.ports.items()
})
self.kernel = kernel
# Allow the displayhook to get the execution count
self.displayhook.get_execution_count = lambda: kernel.execution_count
def init_gui_pylab(self):
"""Enable GUI event loop integration, taking pylab into account."""
# Register inline backend as default
# this is higher priority than matplotlibrc,
# but lower priority than anything else (mpl.use() for instance).
# This only affects matplotlib >= 1.5
if not os.environ.get('MPLBACKEND'):
os.environ['MPLBACKEND'] = 'module://ipykernel.pylab.backend_inline'
# Provide a wrapper for :meth:`InteractiveShellApp.init_gui_pylab`
# to ensure that any exception is printed straight to stderr.
# Normally _showtraceback associates the reply with an execution,
# which means frontends will never draw it, as this exception
# is not associated with any execute request.
shell = self.shell
_showtraceback = shell._showtraceback
try:
# replace error-sending traceback with stderr
def print_tb(etype, evalue, stb):
print ("GUI event loop or pylab initialization failed",
file=sys.stderr)
print (shell.InteractiveTB.stb2text(stb), file=sys.stderr)
shell._showtraceback = print_tb
InteractiveShellApp.init_gui_pylab(self)
finally:
shell._showtraceback = _showtraceback
def init_shell(self):
self.shell = getattr(self.kernel, 'shell', None)
if self.shell:
self.shell.configurables.append(self)
def init_extensions(self):
super(IPKernelApp, self).init_extensions()
# BEGIN HARDCODED WIDGETS HACK
# Ensure ipywidgets extension is loaded if available
extension_man = self.shell.extension_manager
if 'ipywidgets' not in extension_man.loaded:
try:
extension_man.load_extension('ipywidgets')
except ImportError as e:
self.log.debug('ipywidgets package not installed. Widgets will not be available.')
# END HARDCODED WIDGETS HACK
def configure_tornado_logger(self):
""" Configure the tornado logging.Logger.
Must set up the tornado logger or else tornado will call
basicConfig for the root logger which makes the root logger
go to the real sys.stderr instead of the capture streams.
This function mimics the setup of logging.basicConfig.
"""
logger = logging.getLogger('tornado')
handler = logging.StreamHandler()
formatter = logging.Formatter(logging.BASIC_FORMAT)
handler.setFormatter(formatter)
logger.addHandler(handler)
@catch_config_error
def initialize(self, argv=None):
super(IPKernelApp, self).initialize(argv)
if self.subapp is not None:
return
# register zmq IOLoop with tornado
zmq_ioloop.install()
self.init_blackhole()
self.init_connection_file()
self.init_poller()
self.init_sockets()
self.init_heartbeat()
# writing/displaying connection info must be *after* init_sockets/heartbeat
self.write_connection_file()
# Log connection info after writing connection file, so that the connection
# file is definitely available at the time someone reads the log.
self.log_connection_info()
self.init_io()
self.init_signal()
self.init_kernel()
# shell init steps
self.init_path()
self.init_shell()
if self.shell:
self.init_gui_pylab()
self.init_extensions()
self.init_code()
# flush stdout/stderr, so that anything written to these streams during
# initialization do not get associated with the first execution request
sys.stdout.flush()
sys.stderr.flush()
def start(self):
if self.subapp is not None:
return self.subapp.start()
if self.poller is not None:
self.poller.start()
self.kernel.start()
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
pass
launch_new_instance = IPKernelApp.launch_instance
def main():
"""Run an IPKernel as an application"""
app = IPKernelApp.instance()
app.initialize()
app.start()
if __name__ == '__main__':
main()
| apache-2.0 |
jorik041/scikit-learn | sklearn/tests/test_common.py | 127 | 7665 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
yield check, name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_balanced_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False, include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
yield check_get_params_invariance, name, Estimator
| bsd-3-clause |
tortugueta/multilayers | examples/radcenter_distribution.py | 1 | 8087 | # -*- coding: utf-8 -*-
"""
Name : radcenter_distribution
Author : Joan Juvert <trust.no.one.51@gmail.com>
Version : 1.0
Description : This script calculates the influence of the distribution of
: radiative centers in the active layer on the observed
: spectrum.
Copyright 2012 Joan Juvert
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import multilayers as ml
import numpy as np
import bphysics as bp
import scipy.integrate as integ
import argparse as ap
import sys
import pdb
# Argument parsing
parser = ap.ArgumentParser(
description = "This script calculates the effect of the " + \
"distribution of radiative centers in the active layer on " + \
"the modificator to the spectrum. The observation angle is " + \
"a fixed parameter. Optionally, the output can be plotted " + \
"and output to the standard output or to a file. The matrix " + \
"containing the values of F(z, lambda) can be saved to a file " + \
"and recovered in a following run of the program to avoid " + \
"recalculating it in case we want to calculate the effect of " + \
"different distributions on the same system.")
parser.add_argument(
"--graph",
help = "Plot the results",
action = "store_true")
parser.add_argument(
"-o",
"--output",
help = "Dump the results to a file")
parser.add_argument(
"-s",
"--savematrix",
help = "Save the matrix with the F(z, lambda) values to a file")
parser.add_argument(
"-l",
"--loadmatrix",
help = "Load the matrix with the F(z, lambda) values from a file")
args = parser.parse_args()
# Load the depth distribution of radiative centers. Note that the origin
# and units of z must be the same as in the multilayer.The distribution
# should be normalized to 1.
print("Loading the distribution...")
path = "/home/joan/Dropbox/CNM/projectes/simulations_report/figures/" + \
"rcdistributions/"
distribution = bp.rdfile(path + "gaussian_m25_s07.dat", usecols = [0, 1])[1]
print("Done")
print("Checking the distribution...")
integral = integ.simps(distribution[:, 1], distribution[:, 0], 0)
np.testing.assert_almost_equal(integral, 1, 2)
print("Done")
# If we load the values of F(z, lambda) calculated in a previous
# execution we do not need to build the multilayer and repeat the
# calculation of the F function. Notice that the values of z at which
# the new distribution is sampled should be the same as the previous
# one.
if args.loadmatrix:
print("Loading matrix...")
fmatrix = np.load(args.loadmatrix)
zlist = fmatrix['zlist']
np.testing.assert_array_equal(zlist, distribution[:, 0])
wlist = fmatrix['wlist']
angle = fmatrix['angle']
fte = fmatrix['fte']
ftm = fmatrix['ftm']
print("Done")
else:
# Create the materials
print("Loading materials... ")
silicon = ml.Medium("silicon.dat")
air = ml.Medium("air.dat")
sio2 = ml.Medium("sio2.dat")
poly = ml.Medium("polysilicon.dat")
print("Done")
# Set the fixed parameters.
angle = np.deg2rad(0)
# Create the multilayer
print("Building multilayer and allocating memory... ")
thicknesses = [300, 50]
multilayer = ml.Multilayer([
air,
[poly, thicknesses[0]],
[sio2, thicknesses[1]],
silicon])
# Define the wavelengths and z coordinates at which F will be calculated
# and allocate memory for the results. We will use a structured array to
# store the values of F(z, lambda).
wstep = 1
wmin = multilayer.getMinMaxWlength()[0]
wmax = multilayer.getMinMaxWlength()[1]
wlist = np.arange(wmin, wmax, wstep)
zlist = distribution[:, 0]
ftype = np.dtype([
('fx', np.complex128),
('fy', np.complex128),
('fz', np.complex128)])
resmatrix = np.empty((zlist.size, wlist.size), dtype = ftype)
print("Done")
# I(wavelength, theta) = s(wavelength) * F'(wavelength, theta), where
# F'(wav, theta) = integral[z](|F|^2 * rcdist(z). Therefore, we
# calculate the new spectrum as a modification to the original spectrum.
# The modification factor F'(wav, theta) is an integral over z.
# First calculate |Fy|^2 for te and |Fx*cos^2 + Fz*sin^2|^2 for tm. We
# do fx and fz in one loop and fy in another independent loop to avoid
# recalculating the characteristic matrix at every iteration due to the
# change of polarization.
print("Calculating F...")
for (widx, wlength) in enumerate(wlist):
percent = (float(widx) / wlist.size) * 100
print("%.2f%%" % percent)
for (zidx, z) in enumerate(zlist):
resmatrix[zidx][widx]['fx'] = multilayer.calculateFx(z, wlength, angle)
resmatrix[zidx][widx]['fz'] = multilayer.calculateFz(z, wlength, angle)
for (zidx, z) in enumerate(zlist):
resmatrix[zidx][widx]['fy'] = multilayer.calculateFy(z, wlength, angle)
# We are probably more interesed on the effect of the multilayer on the
# energy rather than the electric field. What we want is |Fy(z)|^2 for
# TE waves and |Fx(z) cosA^2 + Fz(z) sinA^2|^2 for TM waves.
ftm = np.absolute(
resmatrix['fx'] * np.cos(angle) ** 2 + \
resmatrix['fz'] * np.sin(angle) ** 2) ** 2
fte = np.absolute(resmatrix['fy']) ** 2
print("Done")
# Notice that until now we have not used the distribution of the
# radiative ceneters, but the calculation of ftm and fte is costly.
# If requested, we can save fte and ftm to a file. In a following
# execution of the script, the matrix can be loaded from the file
# instead of recalculated.
if args.savematrix:
print("Saving matrix...")
np.savez(args.savematrix, fte = fte, ftm = ftm, zlist = zlist,
wlist = wlist, angle = angle)
print("Done")
# Build or load the original spectrum. It should be sampled at the same
# wavelengths defined in wlist. If we are interested only in the
# modificator to the spectrum, not in the modified spectrum, we can
# leave it at 1.
original_spec = 1
# Multiply each F(z, lambda) by the distribution.
print("Integrating...")
distval = distribution[:, 1].reshape(distribution[:, 1].size, 1)
fte_mplied = fte * distval
ftm_mplied = ftm * distval
fte_int = integ.simps(fte_mplied, zlist, axis = 0)
ftm_int = integ.simps(ftm_mplied, zlist, axis = 0)
spectrum_modte = original_spec * fte_int
spectrum_modtm = original_spec * ftm_int
print("Done")
# Dump data to file or stdout
comments = "# F_TE = |Fy^2|^2\n" + \
"# F_TM = |Fx * cosA^2 + Fz * sinA^2|^2\n" + \
"# Modified spectrum for TE and TM waves for a\n" + \
"# distributions of the radiative centers.\n" + \
"# wlength\tF_TE\tF_TM"
if args.output:
bp.wdfile(args.output, comments,
np.array([wlist, spectrum_modte, spectrum_modtm]).T, '%.6e')
else:
print(comments)
for i in xrange(wlist.size):
print("%.6e\t%.6e\t%.6e" % (wlist[i], spectrum_modte[i],
spectrum_modtm[i]))
# Plot data if requested
if args.graph:
import matplotlib.pyplot as plt
plt.plot(wlist, spectrum_modte, label='TE', color = 'r')
plt.plot(wlist, spectrum_modtm, label='TM', color = 'b')
plt.xlabel('Wavelength (nm)')
plt.ylabel('Energy ratio')
plt.grid()
plt.legend(loc=2)
plt.title('%.1f rad' % angle)
plt.show()
plt.close()
| gpl-3.0 |
dpshelio/sunpy | examples/units_and_coordinates/planet_locations.py | 1 | 1252 | """
===================================
Getting the location of the planets
===================================
How to get the position of planetary bodies im the solar system using
`astropy's solar system ephemeris <http://docs.astropy.org/en/stable/coordinates/solarsystem.html#solar-system-ephemerides>`__ information and SunPy.
"""
import matplotlib.pyplot as plt
from astropy.time import Time
from sunpy.coordinates import get_body_heliographic_stonyhurst
##############################################################################
# Lets grab the positions of each of the planets in Heliographic Stonyhurst
# coordinates.
obstime = Time('2014-05-15T07:54:00.005')
planet_list = ['earth', 'venus', 'mars', 'mercury', 'jupiter', 'neptune', 'uranus', 'sun']
planet_coord = [get_body_heliographic_stonyhurst(this_planet, time=obstime) for this_planet in planet_list]
##############################################################################
# Let's plot the results. Remember the Sun is at the center of this coordinate
# system.
ax = plt.subplot(projection='polar')
for this_planet, this_coord in zip(planet_list, planet_coord):
plt.polar(this_coord.lon.to('rad'), this_coord.radius, 'o', label=this_planet)
plt.legend()
plt.show()
| bsd-2-clause |
armgilles/open-moulinette | caf/scripts/PajeCom.py | 2 | 2407 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 2 13:47:48 2015
@author: GILLES Armand
"""
import pandas as pd
import glob
df = pd.read_csv('source/PajeCom2009.csv', sep=";")
df.columns = ['Communes', 'Codes_Insee', 'NB_Allocataires_2009',
'ALL_PAJE_2009', 'ALL_PRIM_2009', 'ALL_BASEP_2009',
'ALL_ASMA_2009','ALL_Clca_Colca_2009']
files = glob.glob('source/PajeCom*')
for path_file in files:
year = str(path_file[-8:-4])
if (year != '2009'):
df_temp = pd.read_csv(path_file, sep=';')
# Rename Col with year
year_col = ['Communes', 'Codes_Insee']
features_col = []
for col in df_temp.columns[2:]:
year_col.append(col +"_"+ year)
features_col.append(col +"_"+ year)
# Adding key for mergeing
features_col.append('Codes_Insee')
df_temp.columns = year_col
df = pd.merge(df, df_temp[features_col], how='inner', on='Codes_Insee')
# Rename col to have unique name in futur merge
list_col = []
for col in df.columns:
if "nb_allocataires" in col.lower(): # NB_Allocataires (2009) != NB_allocataires (2010)
list_col.append(col+"_PC") # PC = PageCom
else:
list_col.append(col)
df.columns = list_col
df.to_csv('data/full_PageCom.csv', encoding='utf-8', index=False)
## Features
#u'NB_Allocataires_2009_PC',
# u'ALL_PAJE_2009', u'ALL_PRIM_2009', u'ALL_BASEP_2009', u'ALL_ASMA_2009',
# u'ALL_Clca_Colca_2009', u'NB_Allocataires_2010_PC', u'ALL_PAJE_2010',
# u'ALL_PRIM_2010', u'ALL_BASEP_2010', u'ALL_ASMA_2010',
# u'ALL_Clca_Colca_2010', u'NB_Allocataires_2011_PC', u'ALL_PAJE_2011',
# u'ALL_PRIM_2011', u'ALL_BASEP_2011', u'ALL_ASMA_2011',
# u'ALL_Clca_Colca_2011', u'NB_Allocataires_2012_PC', u'ALL_PAJE_2012',
# u'ALL_PRIM_2012', u'ALL_BASEP_2012', u'ALL_ASMA_2012',
# u'ALL_Clca_Colca_2012', u'NB_Allocataires_2013_PC', u'ALL_PAJE_2013',
# u'ALL_PRIM_2013', u'ALL_BASEP_2013', u'ALL_ASMA_2013',
# u'ALL_Clca_Colca_2013', u'NB_Allocataires_2014_PC', u'ALL_PAJE_2014',
# u'ALL_PRIM_2014', u'ALL_BASEP_2014', u'ALL_CMG_2014',
# u'ALL_CMG_ASMA_2014', u'ALL_CMG_DOM_2014', u'ALL_CMG_A_2014',
# u'ALL_Clca_Colca_2014', u'NB_Allocataires_2015_PC', u'ALL_PAJE_2015',
# u'ALL_PRIM_2015', u'ALL_BASEP_2015', u'ALL_ASMA_2015',
# u'ALL_Clca_Colca_2015' | mit |
mxjl620/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 160 | 6028 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_safe_multiprocessing_with_blas
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
google-research/google-research | smu/parser/smu_utils_lib_test.py | 1 | 35529 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for smu_utils_lib."""
import copy
import os
import tempfile
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import pandas as pd
from rdkit import Chem
from google.protobuf import text_format
from smu import dataset_pb2
from smu.parser import smu_parser_lib
from smu.parser import smu_utils_lib
MAIN_DAT_FILE = 'x07_sample.dat'
STAGE1_DAT_FILE = 'x07_stage1.dat'
TESTDATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'testdata')
def str_to_bond_topology(s):
bt = dataset_pb2.BondTopology()
text_format.Parse(s, bt)
return bt
def get_stage1_conformer():
parser = smu_parser_lib.SmuParser(
os.path.join(TESTDATA_PATH, STAGE1_DAT_FILE))
conformer, _ = next(parser.process_stage1())
return conformer
def get_stage2_conformer():
parser = smu_parser_lib.SmuParser(os.path.join(TESTDATA_PATH, MAIN_DAT_FILE))
conformer, _ = next(parser.process_stage2())
return conformer
class SpecialIDTest(absltest.TestCase):
def test_from_dat_id(self):
self.assertIsNone(
smu_utils_lib.special_case_bt_id_from_dat_id(123456, 'CC'))
self.assertEqual(smu_utils_lib.special_case_bt_id_from_dat_id(999998, 'O'),
899650)
self.assertEqual(smu_utils_lib.special_case_bt_id_from_dat_id(0, 'O'),
899650)
with self.assertRaises(ValueError):
smu_utils_lib.special_case_bt_id_from_dat_id(0, 'NotASpecialCaseSmiles')
def test_from_bt_id(self):
self.assertIsNone(smu_utils_lib.special_case_dat_id_from_bt_id(123456))
self.assertEqual(
smu_utils_lib.special_case_dat_id_from_bt_id(899651), 999997)
class GetCompositionTest(absltest.TestCase):
def test_simple(self):
bt = dataset_pb2.BondTopology()
bt.atoms.extend([dataset_pb2.BondTopology.ATOM_C,
dataset_pb2.BondTopology.ATOM_C,
dataset_pb2.BondTopology.ATOM_N,
dataset_pb2.BondTopology.ATOM_H,
dataset_pb2.BondTopology.ATOM_H,
dataset_pb2.BondTopology.ATOM_H])
self.assertEqual('x03_c2nh3', smu_utils_lib.get_composition(bt))
class GetCanonicalStoichiometryWithHydrogensTest(absltest.TestCase):
def test_cyclobutane(self):
bt = smu_utils_lib.create_bond_topology('CCCC', '110011', '2222')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt), '(ch2)4')
def test_ethylene(self):
bt = smu_utils_lib.create_bond_topology('CC', '2', '22')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt), '(ch2)2')
def test_acrylic_acid(self):
bt = smu_utils_lib.create_bond_topology('CCCOO', '2000100210', '21001')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt),
'(c)(ch)(ch2)(o)(oh)')
def test_fluorine(self):
bt = smu_utils_lib.create_bond_topology('OFF', '110', '000')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt), '(o)(f)2')
def test_fully_saturated(self):
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('C', '', '4')), '(ch4)')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('N', '', '3')), '(nh3)')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('O', '', '2')), '(oh2)')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('F', '', '1')), '(fh)')
def test_nplus_oneg(self):
bt = smu_utils_lib.create_bond_topology('NO', '1', '30')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt),
'(nh3)(o)')
class ParseBondTopologyTest(absltest.TestCase):
def test_4_heavy(self):
num_atoms, atoms_str, matrix, hydrogens = smu_utils_lib.parse_bond_topology_line(
' 4 N+O O O- 010110 3000')
self.assertEqual(num_atoms, 4)
self.assertEqual(atoms_str, 'N+O O O-')
self.assertEqual(matrix, '010110')
self.assertEqual(hydrogens, '3000')
def test_7_heavy(self):
num_atoms, atoms_str, matrix, hydrogens = smu_utils_lib.parse_bond_topology_line(
' 7 N+O O O O-F F 001011101001000000000 1000000')
self.assertEqual(num_atoms, 7)
self.assertEqual(atoms_str, 'N+O O O O-F F ') # Note the trailing space
self.assertEqual(matrix, '001011101001000000000')
self.assertEqual(hydrogens, '1000000')
class CreateBondTopologyTest(absltest.TestCase):
def test_no_charged(self):
got = smu_utils_lib.create_bond_topology('CNFF', '111000', '1200')
expected_str = '''
atoms: ATOM_C
atoms: ATOM_N
atoms: ATOM_F
atoms: ATOM_F
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
bonds {
atom_a: 1
atom_b: 5
bond_type: BOND_SINGLE
}
bonds {
atom_a: 1
atom_b: 6
bond_type: BOND_SINGLE
}
'''
expected = str_to_bond_topology(expected_str)
self.assertEqual(str(expected), str(got))
def test_charged(self):
# This is actually C N N+O-
got = smu_utils_lib.create_bond_topology('CNNO', '200101', '2020')
expected_str = '''
atoms: ATOM_C
atoms: ATOM_N
atoms: ATOM_NPOS
atoms: ATOM_ONEG
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
bonds {
atom_a: 1
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_a: 2
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
bonds {
atom_b: 5
bond_type: BOND_SINGLE
}
bonds {
atom_a: 2
atom_b: 6
bond_type: BOND_SINGLE
}
bonds {
atom_a: 2
atom_b: 7
bond_type: BOND_SINGLE
}
'''
expected = str_to_bond_topology(expected_str)
self.assertEqual(str(expected), str(got))
def test_one_heavy(self):
got = smu_utils_lib.create_bond_topology('C', '', '4')
expected_str = '''
atoms: ATOM_C
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
'''
expected = str_to_bond_topology(expected_str)
self.assertEqual(str(expected), str(got))
class FromCSVTest(absltest.TestCase):
def test_basic(self):
infile = tempfile.NamedTemporaryFile(mode='w', delete=False)
infile.write(
'id,num_atoms,atoms_str,connectivity_matrix,hydrogens,smiles\n')
infile.write('68,3,C N+O-,310,010,[NH+]#C[O-]\n')
infile.write('134,4,N+O-F F ,111000,1000,[O-][NH+](F)F\n')
infile.close()
out = smu_utils_lib.generate_bond_topologies_from_csv(infile.name)
bt = next(out)
self.assertEqual(68, bt.bond_topology_id)
self.assertLen(bt.atoms, 4)
self.assertEqual(bt.smiles, '[NH+]#C[O-]')
bt = next(out)
self.assertEqual(134, bt.bond_topology_id)
self.assertLen(bt.atoms, 5)
self.assertEqual(bt.smiles, '[O-][NH+](F)F')
class ParseDuplicatesFileTest(absltest.TestCase):
def test_basic(self):
df = smu_utils_lib.parse_duplicates_file(
os.path.join(TESTDATA_PATH, 'small.equivalent_isomers.dat'))
pd.testing.assert_frame_equal(
pd.DataFrame(
columns=['name1', 'stoich1', 'btid1', 'shortconfid1', 'confid1',
'name2', 'stoich2', 'btid2', 'shortconfid2', 'confid2'],
data=[
['x07_c2n2o2fh3.224227.004',
'c2n2o2fh3', 224227, 4, 224227004,
'x07_c2n2o2fh3.224176.005',
'c2n2o2fh3', 224176, 5, 224176005],
['x07_c2n2o2fh3.260543.005',
'c2n2o2fh3', 260543, 5, 260543005,
'x07_c2n2o2fh3.224050.001',
'c2n2o2fh3', 224050, 1, 224050001],
]),
df,
check_like=True)
class BondTopologyToMoleculeTest(absltest.TestCase):
def test_o2(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_O
atoms: ATOM_O
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
''')
got = smu_utils_lib.bond_topology_to_molecule(bond_topology)
self.assertEqual('O=O', Chem.MolToSmiles(got))
def test_methane(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_C
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
''')
got = smu_utils_lib.bond_topology_to_molecule(bond_topology)
self.assertEqual('[H]C([H])([H])[H]', Chem.MolToSmiles(got))
# This molecule is an N+ central atom, bonded to C (triply), O-, and F
def test_charged_molecule(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_C
atoms: ATOM_NPOS
atoms: ATOM_ONEG
atoms: ATOM_F
bonds {
atom_b: 1
bond_type: BOND_TRIPLE
}
bonds {
atom_a: 1
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_a: 1
atom_b: 3
bond_type: BOND_SINGLE
}
''')
got = smu_utils_lib.bond_topology_to_molecule(bond_topology)
self.assertEqual('C#[N+]([O-])F', Chem.MolToSmiles(got))
class ConformerToMoleculeTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.conformer = get_stage2_conformer()
# We'll make a new initial_geometry which is just the current one with all
# coordinates multiplied by 1000
self.conformer.initial_geometries.append(
self.conformer.initial_geometries[0])
new_geom = self.conformer.initial_geometries[1]
for atom_pos in new_geom.atom_positions:
atom_pos.x = atom_pos.x * 1000
atom_pos.y = atom_pos.y * 1000
atom_pos.z = atom_pos.z * 1000
# For the extra bond_topology, we'll just copy the existing one and change
# the id. Through the dumb luck of the molecule we picked there's not a
# simple way to make this a new bond topology and still have it look valid
# to RDKit
self.conformer.bond_topologies.append(self.conformer.bond_topologies[0])
self.conformer.bond_topologies[1].bond_topology_id = 99999
def test_all_outputs(self):
mols = list(smu_utils_lib.conformer_to_molecules(self.conformer))
self.assertLen(mols, 6) # 2 bond topologies * (1 opt geom + 2 init_geom)
self.assertEqual([m.GetProp('_Name') for m in mols], [
'SMU 618451001 bt=618451(0/2) geom=init(0/2)',
'SMU 618451001 bt=618451(0/2) geom=init(1/2)',
'SMU 618451001 bt=618451(0/2) geom=opt',
'SMU 618451001 bt=99999(1/2) geom=init(0/2)',
'SMU 618451001 bt=99999(1/2) geom=init(1/2)',
'SMU 618451001 bt=99999(1/2) geom=opt'
])
self.assertEqual(
'[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]',
Chem.MolToSmiles(mols[0], kekuleSmiles=True, isomericSmiles=False))
self.assertEqual(
'[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]',
Chem.MolToSmiles(mols[4], kekuleSmiles=True, isomericSmiles=False))
def test_initial_only(self):
mols = list(
smu_utils_lib.conformer_to_molecules(
self.conformer,
include_initial_geometries=True,
include_optimized_geometry=False,
include_all_bond_topologies=False))
self.assertLen(mols, 2)
self.assertEqual([m.GetProp('_Name') for m in mols], [
'SMU 618451001 bt=618451(0/2) geom=init(0/2)',
'SMU 618451001 bt=618451(0/2) geom=init(1/2)',
])
# This is just one random atom I picked from the .dat file and converted to
# angstroms instead of bohr.
self.assertEqual('C', mols[0].GetAtomWithIdx(1).GetSymbol())
np.testing.assert_allclose([0.6643, -3.470301, 3.4766],
list(mols[0].GetConformer().GetAtomPosition(1)),
atol=1e-6)
self.assertEqual('C', mols[1].GetAtomWithIdx(1).GetSymbol())
np.testing.assert_allclose([664.299998, -3470.300473, 3476.600215],
list(mols[1].GetConformer().GetAtomPosition(1)),
atol=1e-6)
def test_optimized_only(self):
mols = list(
smu_utils_lib.conformer_to_molecules(
self.conformer,
include_initial_geometries=False,
include_optimized_geometry=True,
include_all_bond_topologies=False))
self.assertLen(mols, 1)
self.assertEqual(
mols[0].GetProp('_Name'),
'SMU 618451001 bt=618451(0/2) geom=opt',
)
self.assertEqual(
'[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]',
Chem.MolToSmiles(mols[0], kekuleSmiles=True, isomericSmiles=False))
# This is just two random atoms I picked from the .dat file and converted to
# angstroms instead of bohr.
self.assertEqual('C', mols[0].GetAtomWithIdx(1).GetSymbol())
np.testing.assert_allclose([0.540254, -3.465543, 3.456982],
list(mols[0].GetConformer().GetAtomPosition(1)),
atol=1e-6)
self.assertEqual('H', mols[0].GetAtomWithIdx(13).GetSymbol())
np.testing.assert_allclose([2.135153, -1.817366, 0.226376],
list(mols[0].GetConformer().GetAtomPosition(13)),
atol=1e-6)
class SmilesCompareTest(absltest.TestCase):
def test_string_format(self):
# for some simplicity later on, we use shorter names
self.assertEqual('MISSING', str(smu_utils_lib.SmilesCompareResult.MISSING))
self.assertEqual('MISMATCH',
str(smu_utils_lib.SmilesCompareResult.MISMATCH))
self.assertEqual('MATCH', str(smu_utils_lib.SmilesCompareResult.MATCH))
def test_missing(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_O
atoms: ATOM_O
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
''')
result, with_h, without_h = smu_utils_lib.bond_topology_smiles_comparison(
bond_topology)
self.assertEqual(smu_utils_lib.SmilesCompareResult.MISSING, result)
self.assertEqual('O=O', with_h)
self.assertEqual('O=O', without_h)
# Also directly test compute_smiles_for_bond_topology
self.assertEqual(
'O=O',
smu_utils_lib.compute_smiles_for_bond_topology(
bond_topology, include_hs=True))
def test_mismatch(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_O
atoms: ATOM_O
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
smiles: "BlahBlahBlah"
''')
result, with_h, without_h = smu_utils_lib.bond_topology_smiles_comparison(
bond_topology)
self.assertEqual(smu_utils_lib.SmilesCompareResult.MISMATCH, result)
self.assertEqual('O=O', with_h)
self.assertEqual('O=O', without_h)
def test_matched_and_h_stripping(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_O
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
smiles: "O"
''')
result, with_h, without_h = smu_utils_lib.bond_topology_smiles_comparison(
bond_topology)
self.assertEqual(smu_utils_lib.SmilesCompareResult.MATCH, result)
self.assertEqual('[H]O[H]', with_h)
self.assertEqual('O', without_h)
# Also directly test compute_smiles_for_bond_topology
self.assertEqual(
'[H]O[H]',
smu_utils_lib.compute_smiles_for_bond_topology(
bond_topology, include_hs=True))
self.assertEqual(
'O',
smu_utils_lib.compute_smiles_for_bond_topology(
bond_topology, include_hs=False))
def test_compute_smiles_from_molecule_no_hs(self):
mol = Chem.MolFromSmiles('FOC', sanitize=False)
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=False), 'COF')
# This is expected. Even with include_hs=True, if there were no Hs in the
# molecule, they will not be in the smiles.
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=True), 'COF')
def test_compute_smiles_from_molecule_with_hs(self):
mol = Chem.MolFromSmiles('FOC', sanitize=False)
Chem.SanitizeMol(mol, Chem.rdmolops.SanitizeFlags.SANITIZE_ADJUSTHS)
mol = Chem.AddHs(mol)
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=False), 'COF')
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=True),
'[H]C([H])([H])OF')
def test_compute_smiles_from_molecule_special_case(self):
mol = Chem.MolFromSmiles('C12=C3C4=C1C4=C23', sanitize=False)
# Double check that this really is the special case -- we get back the
# SMILES we put in even though it's not the one we want.
self.assertEqual('C12=C3C4=C1C4=C23',
Chem.MolToSmiles(mol, kekuleSmiles=True))
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=False),
'C12=C3C1=C1C2=C31')
def test_compute_smiles_from_molecule_labeled_with_h(self):
mol = Chem.MolFromSmiles(
'[O-][N+]([H])([H])N([H])OC([H])([H])F', sanitize=False)
self.assertIsNotNone(mol)
self.assertEqual(
'[O-][N+:1]([H:2])([H:3])[N:4]([H:5])[O:6][C:7]([H:8])([H:9])[F:10]',
smu_utils_lib.compute_smiles_for_molecule(
mol, include_hs=True, labeled_atoms=True))
def test_compute_smiles_from_molecule_labeled_no_h(self):
mol = Chem.MolFromSmiles(
'[O-][N+]([H])([H])N([H])OC([H])([H])F', sanitize=False)
self.assertIsNotNone(mol)
self.assertEqual(
'[O-][NH2+:1][NH:2][O:3][CH2:4][F:5]',
smu_utils_lib.compute_smiles_for_molecule(
mol, include_hs=False, labeled_atoms=True))
class MergeConformersTest(absltest.TestCase):
def setUp(self):
super().setUp()
# We are relying on the fact that the first conformer in both x07_sample.dat
# and x07_stage1.dat are the same.
self.stage1_conformer = get_stage1_conformer()
self.stage2_conformer = get_stage2_conformer()
self.duplicate_conformer = dataset_pb2.Conformer()
self.duplicate_conformer.conformer_id = self.stage1_conformer.conformer_id
# A real duplicate conformer wouldn't have both of these fields filled in,
# but it's fine for the test to make sure everything is copied.
self.duplicate_conformer.duplicated_by = 123
self.duplicate_conformer.duplicate_of.extend([111, 222])
def test_two_stage2(self):
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage2_conformer,
self.stage2_conformer)
def test_two_stage1(self):
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage1_conformer,
self.stage1_conformer)
def test_two_duplicates(self):
duplicate_conformer2 = copy.deepcopy(self.duplicate_conformer)
duplicate_conformer2.duplicate_of[:] = [333, 444]
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.duplicate_conformer, duplicate_conformer2)
self.assertIsNone(got_conflict)
self.assertEqual(123, got_conf.duplicated_by)
self.assertCountEqual([111, 222, 333, 444], got_conf.duplicate_of)
def test_stage2_stage1(self):
# Add a duplicate to stage1 to make sure it is copied
self.stage1_conformer.duplicate_of.append(999)
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertIsNone(got_conflict)
self.assertEqual(got_conf.duplicate_of, [999])
# Just check a random field that is in stage2 but not stage1
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage2_stage1_conflict_energy(self):
self.stage2_conformer.properties.initial_geometry_energy.value = -1.23
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertEqual(got_conflict, [
618451001,
1, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, True,
1, 1, 1, 1, -1.23, 0.052254, -406.522079, 2.5e-05, True, True
])
# Just check a random field that is in stage2 but not stage1
self.assertNotEmpty(got_conf.properties.normal_modes)
# This stage2 values should be returned
self.assertEqual(got_conf.properties.initial_geometry_energy.value, -1.23)
def test_stage2_stage1_conflict_error_codes(self):
self.stage2_conformer.properties.errors.error_nstat1 = 999
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertEqual(got_conflict, [
618451001,
1, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, True,
999, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, True
])
# Just check a random field that is in stage2 but not stage1
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage2_stage1_conflict_missing_geometry(self):
self.stage2_conformer.ClearField('optimized_geometry')
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertEqual(got_conflict, [
618451001,
1, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, True,
1, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, False
])
# Just check a random field that is in stage2 but not stage1
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage2_stage1_no_conflict_minus1(self):
# If stage2 contains a -1, we keep that (stricter error checking later on)
self.stage2_conformer.properties.initial_geometry_energy.value = -1.0
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertIsNone(got_conflict)
self.assertEqual(got_conf.properties.initial_geometry_energy.value, -1.0)
def test_stage2_stage1_no_conflict_approx_equal(self):
self.stage2_conformer.properties.initial_geometry_energy.value += 1e-7
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertIsNone(got_conflict)
# Just check a random field from stage2
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage2_duplicate(self):
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.duplicate_conformer)
self.assertIsNone(got_conflict)
self.assertEqual(got_conf.duplicate_of, [111, 222])
self.assertEqual(got_conf.duplicated_by, 123)
# Just check a random field from stage2
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage1_duplicate(self):
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage1_conformer, self.duplicate_conformer)
self.assertIsNone(got_conflict)
self.assertEqual(got_conf.duplicate_of, [111, 222])
self.assertEqual(got_conf.duplicated_by, 123)
# Just check a random field from stage1
self.assertTrue(got_conf.properties.HasField('initial_geometry_energy'))
def test_multiple_initial_geometries(self):
bad_conformer = copy.deepcopy(self.stage1_conformer)
bad_conformer.initial_geometries.append(bad_conformer.initial_geometries[0])
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(bad_conformer, self.stage2_conformer)
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage2_conformer, bad_conformer)
def test_multiple_bond_topologies(self):
bad_conformer = copy.deepcopy(self.stage1_conformer)
bad_conformer.bond_topologies.append(bad_conformer.bond_topologies[0])
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(bad_conformer, self.stage2_conformer)
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage2_conformer, bad_conformer)
def test_different_bond_topologies(self):
self.stage1_conformer.bond_topologies[0].atoms[0] = (
dataset_pb2.BondTopology.ATOM_H)
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage1_conformer,
self.stage2_conformer)
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage2_conformer,
self.stage1_conformer)
class ConformerErrorTest(absltest.TestCase):
def test_stage1_no_error(self):
conformer = get_stage1_conformer()
self.assertFalse(smu_utils_lib.conformer_has_calculation_errors(conformer))
def test_stage1_error(self):
conformer = get_stage2_conformer()
conformer.properties.errors.error_frequencies = 123
self.assertTrue(smu_utils_lib.conformer_has_calculation_errors(conformer))
def test_stage2_no_error(self):
conformer = get_stage2_conformer()
self.assertFalse(smu_utils_lib.conformer_has_calculation_errors(conformer))
def test_stage2_error_in_1_expected_field(self):
conformer = get_stage2_conformer()
conformer.properties.errors.error_rotational_modes = 123
self.assertTrue(smu_utils_lib.conformer_has_calculation_errors(conformer))
def test_stage2_error_in_0_expected_field(self):
conformer = get_stage2_conformer()
# This field is 0 to indicate no error. Why the discrepancy? Who knows!
conformer.properties.errors.error_nsvg09 = 1
self.assertTrue(smu_utils_lib.conformer_has_calculation_errors(conformer))
def test_stage2_nstat1_is_3(self):
# This is the other bizaare case. nstat1 of 3 is still considered success.
conformer = get_stage2_conformer()
conformer.properties.errors.error_nstat1 = 3
self.assertFalse(smu_utils_lib.conformer_has_calculation_errors(conformer))
class FilterConformerByAvailabilityTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.conformer = dataset_pb2.Conformer()
properties = self.conformer.properties
# A STANDARD field
properties.single_point_energy_pbe0d3_6_311gd.value = 1.23
# A COMPLETE field
properties.homo_pbe0_aug_pc_1.value = 1.23
# An INTERNAL_ONLY field
properties.nuclear_repulsion_energy.value = 1.23
def test_standard(self):
smu_utils_lib.filter_conformer_by_availability(self.conformer,
[dataset_pb2.STANDARD])
self.assertTrue(
self.conformer.properties.HasField(
'single_point_energy_pbe0d3_6_311gd'))
self.assertFalse(self.conformer.properties.HasField('homo_pbe0_aug_pc_1'))
self.assertFalse(
self.conformer.properties.HasField('nuclear_repulsion_energy'))
def test_complete_and_internal_only(self):
smu_utils_lib.filter_conformer_by_availability(
self.conformer, [dataset_pb2.COMPLETE, dataset_pb2.INTERNAL_ONLY])
self.assertFalse(
self.conformer.properties.HasField(
'single_point_energy_pbe0d3_6_311gd'))
self.assertTrue(self.conformer.properties.HasField('homo_pbe0_aug_pc_1'))
self.assertTrue(
self.conformer.properties.HasField('nuclear_repulsion_energy'))
class ConformerToStandardTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.conformer = get_stage2_conformer()
def test_field_filtering(self):
# Check that the field which should be filtered starts out set
self.assertTrue(self.conformer.properties.HasField(
'single_point_energy_hf_6_31gd'))
got = smu_utils_lib.conformer_to_standard(self.conformer)
# Check for a field that was originally in self.conformer and should be
# filtered and a field which should still be present.
self.assertTrue(got.properties.HasField(
'single_point_energy_pbe0d3_6_311gd'))
self.assertFalse(
got.properties.HasField('single_point_energy_hf_6_31gd'))
def test_remove_error_conformer(self):
self.conformer.properties.errors.error_frequencies = 123
self.assertIsNone(smu_utils_lib.conformer_to_standard(self.conformer))
def test_remove_duplicate(self):
self.conformer.duplicated_by = 123
self.assertIsNone(smu_utils_lib.conformer_to_standard(self.conformer))
class DetermineFateTest(parameterized.TestCase):
def test_duplicate_same_topology(self):
conformer = get_stage1_conformer()
# bond topology is conformer_id // 1000
conformer.duplicated_by = conformer.conformer_id + 1
self.assertEqual(dataset_pb2.Conformer.FATE_DUPLICATE_SAME_TOPOLOGY,
smu_utils_lib.determine_fate(conformer))
def test_duplicate_different_topology(self):
conformer = get_stage1_conformer()
# bond topology is conformer_id // 1000
conformer.duplicated_by = conformer.conformer_id + 1000
self.assertEqual(dataset_pb2.Conformer.FATE_DUPLICATE_DIFFERENT_TOPOLOGY,
smu_utils_lib.determine_fate(conformer))
@parameterized.parameters(
(2, dataset_pb2.Conformer.FATE_GEOMETRY_OPTIMIZATION_PROBLEM),
(5, dataset_pb2.Conformer.FATE_DISASSOCIATED),
(4, dataset_pb2.Conformer.FATE_FORCE_CONSTANT_FAILURE),
(6, dataset_pb2.Conformer.FATE_DISCARDED_OTHER))
def test_geometry_failures(self, nstat1, expected_fate):
conformer = get_stage1_conformer()
conformer.properties.errors.error_nstat1 = nstat1
self.assertEqual(expected_fate, smu_utils_lib.determine_fate(conformer))
def test_no_result(self):
conformer = get_stage1_conformer()
self.assertEqual(dataset_pb2.Conformer.FATE_NO_CALCULATION_RESULTS,
smu_utils_lib.determine_fate(conformer))
def test_calculation_errors(self):
conformer = get_stage2_conformer()
# This is a random choice of an error to set. I just need some error.
conformer.properties.errors.error_atomic_analysis = 999
self.assertEqual(dataset_pb2.Conformer.FATE_CALCULATION_WITH_ERROR,
smu_utils_lib.determine_fate(conformer))
def test_success(self):
conformer = get_stage2_conformer()
self.assertEqual(dataset_pb2.Conformer.FATE_SUCCESS,
smu_utils_lib.determine_fate(conformer))
class ToBondTopologySummaryTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.conformer = get_stage2_conformer()
def test_dup_same(self):
self.conformer.fate = dataset_pb2.Conformer.FATE_DUPLICATE_SAME_TOPOLOGY
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 1)
self.assertEqual(got[0].bond_topology.bond_topology_id,
self.conformer.bond_topologies[0].bond_topology_id)
self.assertEqual(got[0].count_attempted_conformers, 1)
self.assertEqual(got[0].count_duplicates_same_topology, 1)
def test_dup_diff(self):
self.conformer.fate = (
dataset_pb2.Conformer.FATE_DUPLICATE_DIFFERENT_TOPOLOGY)
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 1)
self.assertEqual(got[0].count_attempted_conformers, 1)
self.assertEqual(got[0].count_duplicates_different_topology, 1)
def test_geometry_failed(self):
self.conformer.fate = (dataset_pb2.Conformer.FATE_DISCARDED_OTHER)
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 1)
self.assertEqual(got[0].count_attempted_conformers, 1)
self.assertEqual(got[0].count_failed_geometry_optimization, 1)
def test_missing_calculation(self):
self.conformer.fate = dataset_pb2.Conformer.FATE_NO_CALCULATION_RESULTS
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 1)
self.assertEqual(got[0].count_attempted_conformers, 1)
self.assertEqual(got[0].count_kept_geometry, 1)
self.assertEqual(got[0].count_missing_calculation, 1)
def test_calculation_with_error(self):
self.conformer.fate = dataset_pb2.Conformer.FATE_CALCULATION_WITH_ERROR
self.conformer.bond_topologies.append(self.conformer.bond_topologies[0])
self.conformer.bond_topologies[-1].bond_topology_id = 123
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 2)
# We don't actually care about the order, but this is what comes out right
# now.
self.assertEqual(got[0].bond_topology.bond_topology_id, 123)
self.assertEqual(got[0].count_attempted_conformers, 0)
self.assertEqual(got[0].count_kept_geometry, 0)
self.assertEqual(got[0].count_calculation_with_error, 0)
self.assertEqual(got[0].count_detected_match_with_error, 1)
self.assertEqual(got[1].bond_topology.bond_topology_id,
self.conformer.bond_topologies[0].bond_topology_id)
self.assertEqual(got[1].count_attempted_conformers, 1)
self.assertEqual(got[1].count_kept_geometry, 1)
self.assertEqual(got[1].count_calculation_with_error, 1)
self.assertEqual(got[1].count_detected_match_with_error, 0)
def test_calculation_success(self):
self.conformer.fate = dataset_pb2.Conformer.FATE_SUCCESS
self.conformer.bond_topologies.append(self.conformer.bond_topologies[0])
self.conformer.bond_topologies[-1].bond_topology_id = 123
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 2)
# We don't actually care about the order, but this is what comes out right
# now.
self.assertEqual(got[0].bond_topology.bond_topology_id, 123)
self.assertEqual(got[0].count_attempted_conformers, 0)
self.assertEqual(got[0].count_kept_geometry, 0)
self.assertEqual(got[0].count_calculation_success, 0)
self.assertEqual(got[0].count_detected_match_success, 1)
self.assertEqual(got[1].bond_topology.bond_topology_id,
self.conformer.bond_topologies[0].bond_topology_id)
self.assertEqual(got[1].count_attempted_conformers, 1)
self.assertEqual(got[1].count_kept_geometry, 1)
self.assertEqual(got[1].count_calculation_success, 1)
self.assertEqual(got[1].count_detected_match_success, 0)
class LabeledSmilesTester(absltest.TestCase):
def test_atom_labels(self):
mol = Chem.MolFromSmiles('FCON[NH2+][O-]', sanitize=False)
self.assertIsNotNone(mol)
smiles_before = Chem.MolToSmiles(mol)
self.assertEqual(
smu_utils_lib.labeled_smiles(mol), 'F[CH2:1][O:2][NH:3][NH2+:4][O-:5]')
# Testing both the atom numbers and the smiles is redundant,
# but guards against possible future changes.
for atom in mol.GetAtoms():
self.assertEqual(atom.GetAtomMapNum(), 0)
self.assertEqual(Chem.MolToSmiles(mol), smiles_before)
if __name__ == '__main__':
absltest.main()
| apache-2.0 |
kgullikson88/TS23-Scripts | CheckSyntheticTemperature.py | 1 | 14868 | import os
import re
from collections import defaultdict
from operator import itemgetter
import logging
import pandas
from scipy.interpolate import InterpolatedUnivariateSpline as spline
from george import kernels
import matplotlib.pyplot as plt
import numpy as np
import george
import emcee
import StarData
import SpectralTypeRelations
def classify_filename(fname, type='bright'):
"""
Given a CCF filename, it classifies the star combination, temperature, metallicity, and vsini
:param fname:
:return:
"""
# First, remove any leading directories
fname = fname.split('/')[-1]
# Star combination
m1 = re.search('\.[0-9]+kps', fname)
stars = fname[:m1.start()]
star1 = stars.split('+')[0].replace('_', ' ')
star2 = stars.split('+')[1].split('_{}'.format(type))[0].replace('_', ' ')
# secondary star vsini
vsini = float(fname[m1.start() + 1:].split('kps')[0])
# Temperature
m2 = re.search('[0-9]+\.0K', fname)
temp = float(m2.group()[:-1])
# logg
m3 = re.search('K\+[0-9]\.[0-9]', fname)
logg = float(m3.group()[1:])
# metallicity
metal = float(fname.split(str(logg))[-1])
return star1, star2, vsini, temp, logg, metal
def get_ccf_data(basedir, primary_name=None, secondary_name=None, vel_arr=np.arange(-900.0, 900.0, 0.1), type='bright'):
"""
Searches the given directory for CCF files, and classifies
by star, temperature, metallicity, and vsini
:param basedir: The directory to search for CCF files
:keyword primary_name: Optional keyword. If given, it will only get the requested primary star data
:keyword secondary_name: Same as primary_name, but only reads ccfs for the given secondary
:keyword vel_arr: The velocities to interpolate each ccf at
:return: pandas DataFrame
"""
if not basedir.endswith('/'):
basedir += '/'
all_files = ['{}{}'.format(basedir, f) for f in os.listdir(basedir) if type in f.lower()]
primary = []
secondary = []
vsini_values = []
temperature = []
gravity = []
metallicity = []
ccf = []
for fname in all_files:
star1, star2, vsini, temp, logg, metal = classify_filename(fname, type=type)
if primary_name is not None and star1.lower() != primary_name.lower():
continue
if secondary_name is not None and star2.lower() != secondary_name.lower():
continue
vel, corr = np.loadtxt(fname, unpack=True)
fcn = spline(vel, corr)
ccf.append(fcn(vel_arr))
primary.append(star1)
secondary.append(star2)
vsini_values.append(vsini)
temperature.append(temp)
gravity.append(logg)
metallicity.append(metal)
# Make a pandas dataframe with all this data
df = pandas.DataFrame(data={'Primary': primary, 'Secondary': secondary, 'Temperature': temperature,
'vsini': vsini_values, 'logg': gravity, '[Fe/H]': metallicity, 'CCF': ccf})
return df
def get_ccf_summary(basedir, vel_arr=np.arange(-900.0, 900.0, 0.1), velocity='highest', type='bright'):
"""
Very similar to get_ccf_data, but does it in a way that is more memory efficient
:param basedir: The directory to search for CCF files
:keyword velocity: The velocity to measure the CCF at. The default is 'highest', and uses the maximum of the ccf
:keyword vel_arr: The velocities to interpolate each ccf at
:return: pandas DataFrame
"""
if not basedir.endswith('/'):
basedir += '/'
all_files = ['{}{}'.format(basedir, f) for f in os.listdir(basedir) if type in f.lower()]
file_dict = defaultdict(lambda: defaultdict(list))
for fname in all_files:
star1, star2, vsini, temp, logg, metal = classify_filename(fname, type=type)
file_dict[star1][star2].append(fname)
# Now, read the ccfs for each primary/secondary combo, and find the best combination
summary_dfs = []
for primary in file_dict.keys():
for secondary in file_dict[primary].keys():
data = get_ccf_data(basedir, primary_name=primary, secondary_name=secondary,
vel_arr=vel_arr, type=type)
summary_dfs.append(find_best_pars(data, velocity=velocity, vel_arr=vel_arr))
return pandas.concat(summary_dfs, ignore_index=True)
def find_best_pars(df, velocity='highest', vel_arr=np.arange(-900.0, 900.0, 0.1)):
"""
Find the 'best-fit' parameters for each combination of primary and secondary star
:param df: the dataframe to search in
:keyword velocity: The velocity to measure the CCF at. The default is 'highest', and uses the maximum of the ccf
:keyword vel_arr: The velocities to interpolate each ccf at
:return: a dataframe with keys of primary, secondary, and the parameters
"""
# Get the names of the primary and secondary stars
primary_names = pandas.unique(df.Primary)
secondary_names = pandas.unique(df.Secondary)
# Find the ccf value at the given velocity
if velocity == 'highest':
fcn = lambda row: (np.max(row), vel_arr[np.argmax(row)])
vals = df['CCF'].map(fcn)
df['ccf_max'] = vals.map(lambda l: l[0])
df['rv'] = vals.map(lambda l: l[1])
# df['ccf_max'] = df['CCF'].map(np.max)
else:
df['ccf_max'] = df['CCF'].map(lambda arr: arr[np.argmin(np.abs(vel_arr - velocity))])
# Find the best parameter for each combination
d = defaultdict(list)
for primary in primary_names:
for secondary in secondary_names:
good = df.loc[(df.Primary == primary) & (df.Secondary == secondary)]
best = good.loc[good.ccf_max == good.ccf_max.max()]
d['Primary'].append(primary)
d['Secondary'].append(secondary)
d['Temperature'].append(best['Temperature'].item())
d['vsini'].append(best['vsini'].item())
d['logg'].append(best['logg'].item())
d['[Fe/H]'].append(best['[Fe/H]'].item())
d['rv'].append(best['rv'].item())
return pandas.DataFrame(data=d)
def get_detected_objects(df, tol=1.0):
"""
Takes a summary dataframe with RV information. Finds the median rv for each star,
and removes objects that are 'tol' km/s from the median value
:param df: A summary dataframe, such as created by find_best_pars
:param tol: The tolerance, in km/s, to accept an observation as detected
:return: a dataframe containing only detected companions
"""
secondary_names = pandas.unique(df.Secondary)
secondary_to_rv = defaultdict(float)
for secondary in secondary_names:
rv = df.loc[df.Secondary == secondary]['rv'].median()
secondary_to_rv[secondary] = rv
print secondary, rv
keys = df.Secondary.values
good = df.loc[abs(df.rv.values - np.array(itemgetter(*keys)(secondary_to_rv))) < tol]
return good
def add_actual_temperature(df, method='spt'):
"""
Add the actual temperature to a given summary dataframe
:param df: The dataframe to which we will add the actual secondary star temperature
:param method: How to get the actual temperature. Options are:
- 'spt': Use main-sequence relationships to go from spectral type --> temperature
- 'excel': Use tabulated data, available in the file 'SecondaryStar_Temperatures.xls'
:return: copy of the original dataframe, with an extra column for the secondary star temperature
"""
# First, get a list of the secondary stars in the data
secondary_names = pandas.unique(df.Secondary)
secondary_to_temperature = defaultdict(float)
secondary_to_error = defaultdict(float)
if method.lower() == 'spt':
MS = SpectralTypeRelations.MainSequence()
for secondary in secondary_names:
star_data = StarData.GetData(secondary)
spt = star_data.spectype[0] + re.search('[0-9]\.*[0-9]*', star_data.spectype).group()
T_sec = MS.Interpolate(MS.Temperature, spt)
secondary_to_temperature[secondary] = T_sec
elif method.lower() == 'excel':
table = pandas.read_excel('SecondaryStar_Temperatures.xls', 0)
for secondary in secondary_names:
T_sec = table.loc[table.Star.str.lower().str.contains(secondary.strip().lower())]['Literature_Temp'].item()
T_error = table.loc[table.Star.str.lower().str.contains(secondary.strip().lower())][
'Literature_error'].item()
secondary_to_temperature[secondary] = T_sec
secondary_to_error[secondary] = T_error
df['Tactual'] = df['Secondary'].map(lambda s: secondary_to_temperature[s])
df['Tact_err'] = df['Secondary'].map(lambda s: secondary_to_error[s])
return
def make_gaussian_process_samples(df):
"""
Make a gaussian process fitting the Tactual-Tmeasured relationship
:param df: pandas DataFrame with columns 'Temperature' (with the measured temperature)
and 'Tactual' (for the actual temperature)
:return: emcee sampler instance
"""
# First, find the uncertainties at each actual temperature
# Tactual = df['Tactual'].values
#Tmeasured = df['Temperature'].values
#error = df['Tact_err'].values
temp = df.groupby('Temperature').mean()['Tactual']
Tmeasured = temp.keys().values
Tactual = temp.values
error = np.nan_to_num(df.groupby('Temperature').std(ddof=1)['Tactual'].values)
default = np.median(error[error > 1])
error = np.maximum(error, np.ones(error.size) * default)
for Tm, Ta, e in zip(Tmeasured, Tactual, error):
print Tm, Ta, e
plt.figure(1)
plt.errorbar(Tmeasured, Tactual, yerr=error, fmt='.k', capsize=0)
plt.plot(Tmeasured, Tmeasured, 'r--')
plt.xlim((min(Tmeasured) - 100, max(Tmeasured) + 100))
plt.xlabel('Measured Temperature')
plt.ylabel('Actual Temperature')
plt.show(block=False)
# Define some functions to use in the GP fit
def model(pars, T):
#polypars = pars[2:]
#return np.poly1d(polypars)(T)
return T
def lnlike(pars, Tact, Tmeas, Terr):
a, tau = np.exp(pars[:2])
gp = george.GP(a * kernels.ExpSquaredKernel(tau))
gp.compute(Tmeas, Terr)
return gp.lnlikelihood(Tact - model(pars, Tmeas))
def lnprior(pars):
lna, lntau = pars[:2]
polypars = pars[2:]
if -20 < lna < 20 and 4 < lntau < 20:
return 0.0
return -np.inf
def lnprob(pars, x, y, yerr):
lp = lnprior(pars)
return lp + lnlike(pars, x, y, yerr) if np.isfinite(lp) else -np.inf
# Set up the emcee fitter
initial = np.array([0, 6])#, 1.0, 0.0])
ndim = len(initial)
nwalkers = 100
p0 = [np.array(initial) + 1e-8 * np.random.randn(ndim) for i in xrange(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(Tactual, Tmeasured, error))
print 'Running first burn-in'
p1, lnp, _ = sampler.run_mcmc(p0, 500)
sampler.reset()
print "Running second burn-in..."
p_best = p1[np.argmax(lnp)]
p2 = [p_best + 1e-8 * np.random.randn(ndim) for i in xrange(nwalkers)]
p3, _, _ = sampler.run_mcmc(p2, 250)
sampler.reset()
print "Running production..."
sampler.run_mcmc(p3, 1000)
# Plot a bunch of the fits
print "Plotting..."
N = 100
Tvalues = np.arange(3300, 7000, 20)
idx = np.argsort(-sampler.lnprobability.flatten())[:N] # Get N 'best' curves
par_vals = sampler.flatchain[idx]
for i, pars in enumerate(par_vals):
a, tau = np.exp(pars[:2])
gp = george.GP(a * kernels.ExpSquaredKernel(tau))
gp.compute(Tmeasured, error)
s = gp.sample_conditional(Tactual - model(pars, Tmeasured), Tvalues) + model(pars, Tvalues)
plt.plot(Tvalues, s, 'b-', alpha=0.1)
plt.draw()
# Finally, get posterior samples at all the possibly measured temperatures
print 'Generating posterior samples at all temperatures...'
N = 10000 # This is 1/10th of the total number of samples!
idx = np.argsort(-sampler.lnprobability.flatten())[:N] # Get N 'best' curves
par_vals = sampler.flatchain[idx]
Tvalues = np.arange(3000, 6900, 100)
gp_posterior = []
for pars in par_vals:
a, tau = np.exp(pars[:2])
gp = george.GP(a * kernels.ExpSquaredKernel(tau))
gp.compute(Tmeasured, error)
s = gp.sample_conditional(Tactual - model(pars, Tmeasured), Tvalues) + model(pars, Tvalues)
gp_posterior.append(s)
# Finally, make confidence intervals for the actual temperatures
gp_posterior = np.array(gp_posterior)
l, m, h = np.percentile(gp_posterior, [16.0, 50.0, 84.0], axis=0)
conf = pandas.DataFrame(data={'Measured Temperature': Tvalues, 'Actual Temperature': m,
'Lower Bound': l, 'Upper bound': h})
conf.to_csv('Confidence_Intervals.csv', index=False)
return sampler, np.array(gp_posterior)
def check_posterior(df, posterior, Tvalues):
"""
Checks the posterior samples: Are 95% of the measurements within 2-sigma of the prediction?
:param df: The summary dataframe
:param posterior: The MCMC predicted values
:param Tvalues: The measured temperatures the posterior was made with
:return: boolean, as well as some warning messages if applicable
"""
# First, make 2-sigma confidence intervals
l, m, h = np.percentile(posterior, [5.0, 50.0, 95.0], axis=0)
# Save the confidence intervals
# conf = pandas.DataFrame(data={'Measured Temperature': Tvalues, 'Actual Temperature': m,
# 'Lower Bound': l, 'Upper bound': h})
#conf.to_csv('Confidence_Intervals.csv', index=False)
Ntot = [] # The total number of observations with the given measured temperature
Nacc = [] # The number that have actual temperatures within the confidence interval
g = df.groupby('Temperature')
for i, T in enumerate(Tvalues):
if T in g.groups.keys():
Ta = g.get_group(T)['Tactual']
low, high = l[i], h[i]
Ntot.append(len(Ta))
Nacc.append(len(Ta.loc[(Ta >= low) & (Ta <= high)]))
p = float(Nacc[-1]) / float(Ntot[-1])
if p < 0.95:
logging.warn(
'Only {}/{} of the samples ({:.2f}%) were accepted for T = {} K'.format(Nacc[-1], Ntot[-1], p * 100,
T))
print low, high
print sorted(Ta)
else:
Ntot.append(0)
Nacc.append(0)
p = float(sum(Nacc)) / float(sum(Ntot))
if p < 0.95:
logging.warn('Only {:.2f}% of the total samples were accepted!'.format(p * 100))
return False
return True
if __name__ == '__main__':
pass
| gpl-3.0 |
codester2/devide.johannes | install_packages/ip_matplotlib.py | 5 | 5932 | # Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
import config
from install_package import InstallPackage
import os
import shutil
import sys
import utils
from distutils import sysconfig
MPL_VER = "1.1.0"
if os.name == 'posix':
MPL_ARCHIVE = "matplotlib-%s.tar.gz" % (MPL_VER,)
MPL_URL = "http://surfnet.dl.sourceforge.net/sourceforge/matplotlib/%s" % \
(MPL_ARCHIVE,)
elif os.name == 'nt':
if config.WINARCH_STR == 'x64':
WINTHINGY = 'win-amd64'
else:
WINTHINGY = 'win32'
MPL_ARCHIVE = "matplotlib-%s.%s-py2.7.exe" % (MPL_VER, WINTHINGY)
MPL_URL = "http://graphics.tudelft.nl/~cpbotha/files/devide/johannes_support/gohlke/%s" % (MPL_ARCHIVE,)
MPL_DIRBASE = "matplotlib-%s" % (MPL_VER,)
# I prefer that this be built with numpy, but it is not a dependency
# per se
dependencies = []
class matplotlib(InstallPackage):
def __init__(self):
self.tbfilename = os.path.join(config.archive_dir, MPL_ARCHIVE)
self.build_dir = os.path.join(config.build_dir, MPL_DIRBASE)
self.inst_dir = os.path.join(config.inst_dir, 'matplotlib')
def get(self):
if os.path.exists(self.tbfilename):
utils.output("%s already present, not downloading." %
(MPL_ARCHIVE,))
else:
utils.goto_archive()
utils.urlget(MPL_URL)
def unpack(self):
if os.path.isdir(self.build_dir):
utils.output("MATPLOTLIB source already unpacked, not redoing.")
else:
if os.name == 'posix':
utils.output("Unpacking MATPLOTLIB source.")
utils.unpack_build(self.tbfilename)
else:
utils.output("Unpacking MATPLOTLIB binaries.")
os.mkdir(self.build_dir)
os.chdir(self.build_dir)
utils.unpack(self.tbfilename)
def configure(self):
if os.name == 'nt':
utils.output("Skipping configure (WINDOWS).")
return
# pre-configure setup.py and setupext.py so that everything is
# found and configured as we want it.
os.chdir(self.build_dir)
if os.path.exists('setup.py.new'):
utils.output('matplotlib already configured. Skipping step.')
else:
# pre-filter setup.py
repls = [("(BUILD_GTKAGG\s*=\s*).*", "\\1 0"),
("(BUILD_GTK\s*=\s*).*", "\\1 0"),
("(BUILD_TKAGG\s*=\s*).*", "\\1 0"),
("(BUILD_WXAGG\s*=\s*).*", "\\1 1"),
("(rc\s*=\s*dict\().*",
"\\1 {'backend':'PS', 'numerix':'numpy'} )")]
utils.re_sub_filter_file(repls, 'setup.py')
def build(self):
if os.name == 'nt':
utils.output("Skipping build (WINDOWS).")
return
os.chdir(self.build_dir)
# weak test... there are .so files deeper, but they're in platform
# specific directories
if os.path.exists('build'):
utils.output('matplotlib already built. Skipping step.')
else:
# add wx bin to path so that wx-config can be found
os.environ['PATH'] = "%s%s%s" % (config.WX_BIN_PATH,
os.pathsep, os.environ['PATH'])
ret = os.system('%s setup.py build' % (sys.executable,))
if ret != 0:
utils.error('matplotlib build failed. Please fix and try again.')
def install(self):
# to test for install, just do python -c "import matplotlib"
# and test the result (we could just import directly, but that would
# only work once our invoking python has been stopped and started
# again)
os.chdir(config.archive_dir) # we need to be elsewhere!
ret = os.system('%s -c "import matplotlib"' % (sys.executable,))
if ret == 0:
utils.output('matplotlib already installed. Skipping step.')
else:
utils.output('ImportError test shows that matplotlib is not '
'installed. Installing...')
if os.name == 'nt':
self.install_nt()
else:
self.install_posix()
# make sure the backend is set to WXAgg
# and that interactive is set to True
rcfn = os.path.join(
config.PYTHON_SITE_PACKAGES,
'matplotlib', 'mpl-data', 'matplotlibrc')
utils.re_sub_filter_file(
[("(\s*backend\s*\:).*", "\\1 WXAgg"),
("#*(\s*interactive\s:).*","\\1 True")], rcfn)
def install_nt(self):
sp_dir = sysconfig.get_python_lib()
utils.copy_glob(os.path.join(self.build_dir, 'PLATLIB', '*'), sp_dir)
def install_posix(self):
os.chdir(self.build_dir)
# add wx bin to path so that wx-config can be found
os.environ['PATH'] = "%s%s%s" % (config.WX_BIN_PATH,
os.pathsep, os.environ['PATH'])
ret = os.system('%s setup.py install' % (sys.executable,))
if ret != 0:
utils.error(
'matplotlib install failed. Please fix and try again.')
def clean_build(self):
utils.output("Removing build and install directories.")
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
from distutils import sysconfig
matplotlib_instdir = os.path.join(sysconfig.get_python_lib(),
'matplotlib')
if os.path.exists(matplotlib_instdir):
shutil.rmtree(matplotlib_instdir)
def get_installed_version(self):
import matplotlib
return matplotlib.__version__
| bsd-3-clause |
vshtanko/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
nixingyang/Kaggle-Competitions | TalkingData AdTracking Fraud Detection/perform_ensembling.py | 1 | 2489 | import os
import glob
import shutil
import datetime
import numpy as np
import pandas as pd
# Dataset
PROJECT_NAME = "TalkingData AdTracking Fraud Detection"
PROJECT_FOLDER_PATH = os.path.join(os.path.expanduser("~"), "Documents/Dataset",
PROJECT_NAME)
# Submission
TEAM_NAME = "Aurora"
SUBMISSION_FOLDER_PATH = os.path.join(PROJECT_FOLDER_PATH, "submission")
os.makedirs(SUBMISSION_FOLDER_PATH, exist_ok=True)
# Ensembling
WORKSPACE_FOLDER_PATH = os.path.join(PROJECT_FOLDER_PATH, "script/Mar_25_3")
KEYWORD = "DL"
# Generate a zip archive for a file
create_zip_archive = lambda file_path: shutil.make_archive(
file_path[:file_path.rindex(".")], "zip",
os.path.abspath(os.path.join(file_path, "..")), os.path.basename(file_path))
def run():
print("Searching for submissions with keyword {} at {} ...".format(
KEYWORD, WORKSPACE_FOLDER_PATH))
submission_file_path_list = sorted(
glob.glob(os.path.join(WORKSPACE_FOLDER_PATH, "*{}*".format(KEYWORD))))
assert len(submission_file_path_list) != 0
ranking_array_list = []
for submission_file_path in submission_file_path_list:
print("Loading {} ...".format(submission_file_path))
submission_df = pd.read_csv(submission_file_path)
print("Ranking the entries ...")
index_series = submission_df["is_attributed"].argsort()
ranking_array = np.zeros(index_series.shape, dtype=np.uint32)
ranking_array[index_series] = np.arange(len(index_series))
ranking_array_list.append(ranking_array)
ensemble_df = submission_df.copy()
ensemble_prediction_array = np.mean(ranking_array_list, axis=0)
apply_normalization = lambda data_array: 1.0 * (data_array - np.min(
data_array)) / (np.max(data_array) - np.min(data_array))
ensemble_df["is_attributed"] = apply_normalization(
ensemble_prediction_array)
ensemble_file_path = os.path.join(
SUBMISSION_FOLDER_PATH, "{} {} {}.csv".format(
TEAM_NAME, KEYWORD,
str(datetime.datetime.now()).split(".")[0]).replace(" ", "_"))
print("Saving submission to {} ...".format(ensemble_file_path))
ensemble_df.to_csv(ensemble_file_path, float_format="%.6f", index=False)
compressed_ensemble_file_path = create_zip_archive(ensemble_file_path)
print("Saving compressed submission to {} ...".format(
compressed_ensemble_file_path))
print("All done!")
if __name__ == "__main__":
run()
| mit |
flennerhag/mlens | mlens/externals/sklearn/validation.py | 1 | 27114 | """
Scikit-learn utilities for input validation.
"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from .. import six
from ...utils.exceptions import NotFittedError, NonBLASDotWarning, \
DataConversionWarning
try:
from inspect import signature
except ImportError:
from mlens.externals.funcsigs import signature
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Parameters
----------
X : array or sparse matrix
"""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats.
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
if X.dtype.kind in 'uib' and X.dtype.itemsize <= 4:
return_dtype = np.float32
else:
return_dtype = np.float64
return X.astype(return_dtype)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit') and callable(x.fit):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent representation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
lengths = [_num_samples(X) for X in arrays if X is not None]
uniques = np.unique(lengths)
if len(uniques) > 1:
raise ValueError("Found input variables with inconsistent numbers of"
" samples: %r" % [int(l) for l in lengths])
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, boolean or list/tuple of strings
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). If the input is sparse but
not in the allowed format, it will be converted to the first listed
format. True allows the input to be any format. False means
that a sparse matrix input will raise an error.
dtype : string, type or None
Data type of result. If None, the dtype of the input is preserved.
copy : boolean
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if isinstance(accept_sparse, six.string_types):
accept_sparse = [accept_sparse]
if accept_sparse is False:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
elif isinstance(accept_sparse, (list, tuple)):
if len(accept_sparse) == 0:
raise ValueError("When providing 'accept_sparse' "
"as a tuple or list, it must contain at "
"least one string value.")
# ensure correct sparse format
if spmatrix.format not in accept_sparse:
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
elif accept_sparse is not True:
# any other type
raise ValueError("Parameter 'accept_sparse' should be a string, "
"boolean or list of strings. You provided "
"'accept_sparse={}'.".format(accept_sparse))
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=False, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2D numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, boolean or list/tuple of strings (default=False)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
.. deprecated:: 0.19
Passing 'None' to parameter ``accept_sparse`` in methods is
deprecated in version 0.19 "and will be removed in 0.21. Use
``accept_sparse=False`` instead.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to raise a value error if X is not 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
# accept_sparse 'None' deprecation check
if accept_sparse is None:
warnings.warn(
"Passing 'None' to parameter 'accept_sparse' in methods "
"check_array and check_X_y is deprecated in version 0.19 "
"and will be removed in 0.21. Use 'accept_sparse=False' "
" instead.", DeprecationWarning)
accept_sparse = False
# store whether originally we wanted numeric dtype
dtype_numeric = isinstance(dtype, six.string_types) and dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if estimator is not None:
if isinstance(estimator, six.string_types):
estimator_name = estimator
else:
estimator_name = estimator.__class__.__name__
else:
estimator_name = "Estimator"
context = " by %s" % estimator_name if estimator is not None else ""
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
raise ValueError(
"Expected 2D array, got 1D array instead:\narray={}.\n"
"Reshape your data either using array.reshape(-1, 1) if "
"your data has a single feature or array.reshape(1, -1) "
"if it contains a single sample.".format(array))
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. %s expected <= 2."
% (array.ndim, estimator_name))
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, shape_repr, ensure_min_samples,
context))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, shape_repr, ensure_min_features,
context))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s%s."
% (dtype_orig, array.dtype, context))
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=False, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2d and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, boolean or list of string (default=False)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
.. deprecated:: 0.19
Passing 'None' to parameter ``accept_sparse`` in methods is
deprecated in version 0.19 "and will be removed in 0.21. Use
``accept_sparse=False`` instead.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X. This parameter
does not influence whether y can have np.inf or np.nan values.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
Parameters
----------
seed : None | int | instance of RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Parameters
----------
estimator : object
An estimator to inspect.
parameter: str
The searched parameter.
Returns
-------
is_parameter: bool
Whether the parameter was found to be a named parameter of the
estimator's fit method.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in signature(estimator.fit).parameters
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg.:
``["coef_", "estimator_", ...], "coef_"``
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
Returns
-------
None
Raises
------
NotFittedError
If the attributes are not found.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
| mit |
matousc89/padasip | padasip/filters/nlmf.py | 1 | 5444 | """
.. versionadded:: 1.1.0
The least-mean-fourth (LMF) adaptive filter implemented according to the
paper :cite:`zerguine2000convergence`. The NLMF is an extension of the LMF
adaptive filter (:ref:`filter-lmf`).
The NLMF filter can be created as follows
>>> import padasip as pa
>>> pa.filters.FilterNLMF(n)
where `n` is the size (number of taps) of the filter.
Content of this page:
.. contents::
:local:
:depth: 1
.. seealso:: :ref:`filters`
Algorithm Explanation
======================================
The NLMF is extension of LMF filter. See :ref:`filter-lmf`
for explanation of the algorithm behind.
The extension is based on normalization of learning rate.
The learning rage :math:`\mu` is replaced by learning rate :math:`\eta(k)`
normalized with every new sample according to input power as follows
:math:`\eta (k) = \\frac{\mu}{\epsilon + || \\textbf{x}(k) ||^2}`,
where :math:`|| \\textbf{x}(k) ||^2` is norm of input vector and
:math:`\epsilon` is a small positive constant (regularization term).
This constant is introduced to preserve the stability in cases where
the input is close to zero.
Minimal Working Examples
======================================
If you have measured data you may filter it as follows
.. code-block:: python
import numpy as np
import matplotlib.pylab as plt
import padasip as pa
# creation of data
N = 500
x = np.random.normal(0, 1, (N, 4)) # input matrix
v = np.random.normal(0, 0.1, N) # noise
d = 2*x[:,0] + 0.1*x[:,1] - 0.3*x[:,2] + 0.5*x[:,3] + v # target
# identification
f = pa.filters.FilterNLMF(n=4, mu=0.005, w="random")
y, e, w = f.run(d, x)
# show results
plt.figure(figsize=(15,9))
plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k")
plt.plot(d,"b", label="d - target")
plt.plot(y,"g", label="y - output");plt.legend()
plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k")
plt.plot(10*np.log10(e**2),"r", label="e - error [dB]");plt.legend()
plt.tight_layout()
plt.show()
References
======================================
.. bibliography:: lmf.bib
:style: plain
Code Explanation
======================================
"""
import numpy as np
from padasip.filters.base_filter import AdaptiveFilter
class FilterNLMF(AdaptiveFilter):
"""
Adaptive NLMF filter.
**Args:**
* `n` : length of filter (integer) - how many input is input array
(row of input matrix)
**Kwargs:**
* `mu` : learning rate (float). Also known as step size.
If it is too slow,
the filter may have bad performance. If it is too high,
the filter will be unstable. The default value can be unstable
for ill-conditioned input data.
* `eps` : regularization term (float). It is introduced to preserve
stability for close-to-zero input vectors
* `w` : initial weights of filter. Possible values are:
* array with initial weights (1 dimensional array) of filter size
* "random" : create random weights
* "zeros" : create zero value weights
"""
def __init__(self, n, mu=0.1, eps=1., w="random"):
self.kind = "NLMF filter"
if type(n) == int:
self.n = n
else:
raise ValueError('The size of filter must be an integer')
self.mu = self.check_float_param(mu, 0, 1000, "mu")
self.eps = self.check_float_param(eps, 0, 1000, "eps")
self.init_weights(w, self.n)
self.w_history = False
def adapt(self, d, x):
"""
Adapt weights according one desired value and its input.
**Args:**
* `d` : desired value (float)
* `x` : input array (1-dimensional array)
"""
y = np.dot(self.w, x)
e = d - y
nu = self.mu / (self.eps + np.dot(x, x))
self.w += nu * x * e**3
def run(self, d, x):
"""
This function filters multiple samples in a row.
**Args:**
* `d` : desired value (1 dimensional array)
* `x` : input matrix (2-dimensional array). Rows are samples,
columns are input arrays.
**Returns:**
* `y` : output value (1 dimensional array).
The size corresponds with the desired value.
* `e` : filter error for every sample (1 dimensional array).
The size corresponds with the desired value.
* `w` : history of all weights (2 dimensional array).
Every row is set of the weights for given sample.
"""
# measure the data and check if the dimmension agree
N = len(x)
if not len(d) == N:
raise ValueError('The length of vector d and matrix x must agree.')
self.n = len(x[0])
# prepare data
try:
x = np.array(x)
d = np.array(d)
except:
raise ValueError('Impossible to convert x or d to a numpy array')
# create empty arrays
y = np.zeros(N)
e = np.zeros(N)
self.w_history = np.zeros((N,self.n))
# adaptation loop
for k in range(N):
self.w_history[k,:] = self.w
y[k] = np.dot(self.w, x[k])
e[k] = d[k] - y[k]
nu = self.mu / (self.eps + np.dot(x[k], x[k]))
dw = nu * x[k] * e[k]**3
self.w += dw
return y, e, self.w_history
| mit |
LevinJ/Supply-demand-forecasting | implement/xgboostmodel.py | 1 | 4070 | import sys
import os
sys.path.insert(0, os.path.abspath('..'))
from preprocess.preparedata import PrepareData
import numpy as np
from utility.runtype import RunType
from utility.datafilepath import g_singletonDataFilePath
from preprocess.splittrainvalidation import HoldoutSplitMethod
import xgboost as xgb
from evaluation.sklearnmape import mean_absolute_percentage_error_xgboost
from evaluation.sklearnmape import mean_absolute_percentage_error
from utility.modelframework import ModelFramework
from utility.xgbbasemodel import XGBoostGridSearch
from evaluation.sklearnmape import mean_absolute_percentage_error_xgboost_cv
from utility.xgbbasemodel import XGBoostBase
import logging
import sys
class DidiXGBoostModel(XGBoostBase, PrepareData, XGBoostGridSearch):
def __init__(self):
PrepareData.__init__(self)
XGBoostGridSearch.__init__(self)
XGBoostBase.__init__(self)
self.best_score_colname_in_cv = 'test-mape-mean'
self.do_cross_val = False
self.train_validation_foldid = -2
if self.do_cross_val is None:
root = logging.getLogger()
root.setLevel(logging.DEBUG)
root.addHandler(logging.StreamHandler(sys.stdout))
root.addHandler(logging.FileHandler('logs/finetune_parameters.log', mode='w'))
return
def set_xgb_parameters(self):
early_stopping_rounds = 3
self.xgb_params = {'silent':1, 'colsample_bytree': 0.8, 'silent': 1, 'lambda ': 1, 'min_child_weight': 1, 'subsample': 0.8, 'eta': 0.01, 'objective': 'reg:linear', 'max_depth': 7}
# self.xgb_params = {'silent':1 }
self.xgb_learning_params = {
'num_boost_round': 200,
'callbacks':[xgb.callback.print_evaluation(show_stdv=True),xgb.callback.early_stop(early_stopping_rounds)],
'feval':mean_absolute_percentage_error_xgboost_cv}
if self.do_cross_val == False:
self.xgb_learning_params['feval'] = mean_absolute_percentage_error_xgboost
return
def get_paramgrid_1(self):
"""
This method must be overriden by derived class when its objective is not reg:linear
"""
param_grid = {'max_depth':[6], 'eta':[0.1], 'min_child_weight':[1],'silent':[1],
'objective':['reg:linear'],'colsample_bytree':[0.8],'subsample':[0.8], 'lambda ':[1]}
return param_grid
def get_paramgrid_2(self, param_grid):
"""
This method must be overriden by derived class if it intends to fine tune parameters
"""
self.ramdonized_search_enable = False
self.randomized_search_n_iter = 150
self.grid_search_display_result = True
param_grid['eta'] = [0.01] #train-mape:-0.448062+0.00334926 test-mape:-0.448402+0.00601761
# param_grid['max_depth'] = [7] #train-mape:-0.363007+0.00454276 test-mape:-0.452832+0.00321641
# param_grid['colsample_bytree'] = [0.8]
param_grid['max_depth'] = range(5,8) #train-mape:-0.363007+0.00454276 test-mape:-0.452832+0.00321641
param_grid['colsample_bytree'] = [0.6,0.8,1.0]
# param_grid['lambda'] = range(1,15)
# param_grid['max_depth'] = [3,4]
# param_grid['eta'] = [0.01,0.1] # 0.459426+0.00518875
# param_grid['subsample'] = [0.5] #0.458935+0.00522205
# param_grid['eta'] = [0.005] #0.457677+0.00526401
return param_grid
def get_learning_params(self):
"""e
This method must be overriden by derived class if it intends to fine tune parameters
"""
num_boost_round = 100
early_stopping_rounds = 5
kwargs = {'num_boost_round':num_boost_round, 'feval':mean_absolute_percentage_error_xgboost_cv,
'callbacks':[xgb.callback.print_evaluation(show_stdv=True),xgb.callback.early_stop(early_stopping_rounds)]}
return kwargs
if __name__ == "__main__":
obj= DidiXGBoostModel()
obj.run() | mit |
anntzer/scikit-learn | sklearn/linear_model/_passive_aggressive.py | 2 | 17363 | # Authors: Rob Zinkov, Mathieu Blondel
# License: BSD 3 clause
from ..utils.validation import _deprecate_positional_args
from ._stochastic_gradient import BaseSGDClassifier
from ._stochastic_gradient import BaseSGDRegressor
from ._stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float, default=1.0
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
max_iter : int, default=1000
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
:meth:`partial_fit` method.
.. versionadded:: 0.19
tol : float or None, default=1e-3
The stopping criterion. If it is not None, the iterations will stop
when (loss > previous_loss - tol).
.. versionadded:: 0.19
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation.
score is not improving. If set to True, it will automatically set aside
a stratified fraction of training data as validation and terminate
training when validation score is not improving by at least tol for
n_iter_no_change consecutive epochs.
.. versionadded:: 0.20
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True.
.. versionadded:: 0.20
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before early stopping.
.. versionadded:: 0.20
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
verbose : integer, default=0
The verbosity level
loss : string, default="hinge"
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
n_jobs : int or None, default=None
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance, default=None
Used to shuffle the training data, when ``shuffle`` is set to
``True``. Pass an int for reproducible output across multiple
function calls.
See :term:`Glossary <random_state>`.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
Repeatedly calling fit or partial_fit when warm_start is True can
result in a different solution than when calling fit a single time
because of the way the data is shuffled.
class_weight : dict, {class_label: weight} or "balanced" or None, \
default=None
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
.. versionadded:: 0.17
parameter *class_weight* to automatically weight samples.
average : bool or int, default=False
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
.. versionadded:: 0.19
parameter *average* to use weights averaging in SGD
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
For multiclass fits, it is the maximum over every binary fit.
classes_ : array of shape (n_classes,)
The unique classes labels.
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples)``.
loss_function_ : callable
Loss function used by the algorithm.
Examples
--------
>>> from sklearn.linear_model import PassiveAggressiveClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_features=4, random_state=0)
>>> clf = PassiveAggressiveClassifier(max_iter=1000, random_state=0,
... tol=1e-3)
>>> clf.fit(X, y)
PassiveAggressiveClassifier(random_state=0)
>>> print(clf.coef_)
[[0.26642044 0.45070924 0.67251877 0.64185414]]
>>> print(clf.intercept_)
[1.84127814]
>>> print(clf.predict([[0, 0, 0, 0]]))
[1]
See Also
--------
SGDClassifier
Perceptron
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
@_deprecate_positional_args
def __init__(self, *, C=1.0, fit_intercept=True, max_iter=1000, tol=1e-3,
early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, shuffle=True, verbose=0, loss="hinge",
n_jobs=None, random_state=None, warm_start=False,
class_weight=None, average=False):
super().__init__(
penalty=None,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=tol,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
class_weight=class_weight,
average=average,
n_jobs=n_jobs)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns an instance of self.
"""
self._validate_params(for_partial_fit=True)
if self.class_weight == 'balanced':
raise ValueError("class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter.")
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr, max_iter=1,
classes=classes, sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
self._validate_params()
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr,
coef_init=coef_init, intercept_init=intercept_init)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float, default=1.0
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
max_iter : int, default=1000
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
:meth:`partial_fit` method.
.. versionadded:: 0.19
tol : float or None, default=1e-3
The stopping criterion. If it is not None, the iterations will stop
when (loss > previous_loss - tol).
.. versionadded:: 0.19
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation.
score is not improving. If set to True, it will automatically set aside
a fraction of training data as validation and terminate
training when validation score is not improving by at least tol for
n_iter_no_change consecutive epochs.
.. versionadded:: 0.20
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True.
.. versionadded:: 0.20
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before early stopping.
.. versionadded:: 0.20
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
verbose : integer, default=0
The verbosity level
loss : string, default="epsilon_insensitive"
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
epsilon : float, default=0.1
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
random_state : int, RandomState instance, default=None
Used to shuffle the training data, when ``shuffle`` is set to
``True``. Pass an int for reproducible output across multiple
function calls.
See :term:`Glossary <random_state>`.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
Repeatedly calling fit or partial_fit when warm_start is True can
result in a different solution than when calling fit a single time
because of the way the data is shuffled.
average : bool or int, default=False
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
.. versionadded:: 0.19
parameter *average* to use weights averaging in SGD
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples)``.
Examples
--------
>>> from sklearn.linear_model import PassiveAggressiveRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=4, random_state=0)
>>> regr = PassiveAggressiveRegressor(max_iter=100, random_state=0,
... tol=1e-3)
>>> regr.fit(X, y)
PassiveAggressiveRegressor(max_iter=100, random_state=0)
>>> print(regr.coef_)
[20.48736655 34.18818427 67.59122734 87.94731329]
>>> print(regr.intercept_)
[-0.02306214]
>>> print(regr.predict([[0, 0, 0, 0]]))
[-0.02306214]
See Also
--------
SGDRegressor
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
@_deprecate_positional_args
def __init__(self, *, C=1.0, fit_intercept=True, max_iter=1000, tol=1e-3,
early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, shuffle=True, verbose=0,
loss="epsilon_insensitive", epsilon=DEFAULT_EPSILON,
random_state=None, warm_start=False,
average=False):
super().__init__(
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=tol,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start,
average=average)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
Returns
-------
self : returns an instance of self.
"""
self._validate_params(for_partial_fit=True)
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr, max_iter=1,
sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
self._validate_params()
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init)
| bsd-3-clause |
cloud-fan/spark | python/pyspark/pandas/data_type_ops/base.py | 1 | 12265 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numbers
from abc import ABCMeta
from itertools import chain
from typing import Any, Optional, TYPE_CHECKING, Union
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark.sql import functions as F
from pyspark.sql.types import (
ArrayType,
BinaryType,
BooleanType,
DataType,
DateType,
FractionalType,
IntegralType,
MapType,
NullType,
NumericType,
StringType,
StructType,
TimestampType,
UserDefinedType,
)
from pyspark.pandas.typedef import Dtype, extension_dtypes
from pyspark.pandas.typedef.typehints import extension_object_dtypes_available
if extension_object_dtypes_available:
from pandas import BooleanDtype
if TYPE_CHECKING:
from pyspark.pandas.indexes import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
def is_valid_operand_for_numeric_arithmetic(operand: Any, *, allow_bool: bool = True) -> bool:
"""Check whether the `operand` is valid for arithmetic operations against numerics."""
from pyspark.pandas.base import IndexOpsMixin
if isinstance(operand, numbers.Number):
return not isinstance(operand, bool) or allow_bool
elif isinstance(operand, IndexOpsMixin):
if isinstance(operand.dtype, CategoricalDtype):
return False
else:
return isinstance(operand.spark.data_type, NumericType) or (
allow_bool and isinstance(operand.spark.data_type, BooleanType)
)
else:
return False
def transform_boolean_operand_to_numeric(
operand: Any, spark_type: Optional[DataType] = None
) -> Any:
"""Transform boolean operand to numeric.
If the `operand` is:
- a boolean IndexOpsMixin, transform the `operand` to the `spark_type`.
- a boolean literal, transform to the int value.
Otherwise, return the operand as it is.
"""
from pyspark.pandas.base import IndexOpsMixin
if isinstance(operand, IndexOpsMixin) and isinstance(operand.spark.data_type, BooleanType):
assert spark_type, "spark_type must be provided if the operand is a boolean IndexOpsMixin"
return operand.spark.transform(lambda scol: scol.cast(spark_type))
elif isinstance(operand, bool):
return int(operand)
else:
return operand
def _as_categorical_type(
index_ops: Union["Series", "Index"], dtype: CategoricalDtype, spark_type: DataType
) -> Union["Index", "Series"]:
"""Cast `index_ops` to categorical dtype, given `dtype` and `spark_type`."""
assert isinstance(dtype, CategoricalDtype)
if dtype.categories is None:
codes, uniques = index_ops.factorize()
return codes._with_new_scol(
codes.spark.column,
field=codes._internal.data_fields[0].copy(dtype=CategoricalDtype(categories=uniques)),
)
else:
categories = dtype.categories
if len(categories) == 0:
scol = F.lit(-1)
else:
kvs = chain(
*[(F.lit(category), F.lit(code)) for code, category in enumerate(categories)]
)
map_scol = F.create_map(*kvs)
scol = F.coalesce(map_scol.getItem(index_ops.spark.column), F.lit(-1))
return index_ops._with_new_scol(
scol.cast(spark_type).alias(index_ops._internal.data_fields[0].name),
field=index_ops._internal.data_fields[0].copy(
dtype=dtype, spark_type=spark_type, nullable=False
),
)
def _as_bool_type(
index_ops: Union["Series", "Index"], dtype: Union[str, type, Dtype]
) -> Union["Index", "Series"]:
"""Cast `index_ops` to BooleanType Spark type, given `dtype`."""
from pyspark.pandas.internal import InternalField
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(BooleanType())
else:
scol = F.when(index_ops.spark.column.isNull(), F.lit(False)).otherwise(
index_ops.spark.column.cast(BooleanType())
)
return index_ops._with_new_scol(
scol.alias(index_ops._internal.data_spark_column_names[0]),
field=InternalField(dtype=dtype),
)
def _as_string_type(
index_ops: Union["Series", "Index"],
dtype: Union[str, type, Dtype],
*,
null_str: str = str(None)
) -> Union["Index", "Series"]:
"""Cast `index_ops` to StringType Spark type, given `dtype` and `null_str`,
representing null Spark column.
"""
from pyspark.pandas.internal import InternalField
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(StringType())
else:
casted = index_ops.spark.column.cast(StringType())
scol = F.when(index_ops.spark.column.isNull(), null_str).otherwise(casted)
return index_ops._with_new_scol(
scol.alias(index_ops._internal.data_spark_column_names[0]),
field=InternalField(dtype=dtype),
)
def _as_other_type(
index_ops: Union["Series", "Index"], dtype: Union[str, type, Dtype], spark_type: DataType
) -> Union["Index", "Series"]:
"""Cast `index_ops` to a `dtype` (`spark_type`) that needs no pre-processing.
Destination types that need pre-processing: CategoricalDtype, BooleanType, and StringType.
"""
from pyspark.pandas.internal import InternalField
need_pre_process = (
isinstance(dtype, CategoricalDtype)
or isinstance(spark_type, BooleanType)
or isinstance(spark_type, StringType)
)
assert not need_pre_process, "Pre-processing is needed before the type casting."
scol = index_ops.spark.column.cast(spark_type)
return index_ops._with_new_scol(
scol.alias(index_ops._internal.data_spark_column_names[0]),
field=InternalField(dtype=dtype),
)
class DataTypeOps(object, metaclass=ABCMeta):
"""The base class for binary operations of pandas-on-Spark objects (of different data types)."""
def __new__(cls, dtype: Dtype, spark_type: DataType):
from pyspark.pandas.data_type_ops.binary_ops import BinaryOps
from pyspark.pandas.data_type_ops.boolean_ops import BooleanOps, BooleanExtensionOps
from pyspark.pandas.data_type_ops.categorical_ops import CategoricalOps
from pyspark.pandas.data_type_ops.complex_ops import ArrayOps, MapOps, StructOps
from pyspark.pandas.data_type_ops.date_ops import DateOps
from pyspark.pandas.data_type_ops.datetime_ops import DatetimeOps
from pyspark.pandas.data_type_ops.null_ops import NullOps
from pyspark.pandas.data_type_ops.num_ops import IntegralOps, FractionalOps
from pyspark.pandas.data_type_ops.string_ops import StringOps
from pyspark.pandas.data_type_ops.udt_ops import UDTOps
if isinstance(dtype, CategoricalDtype):
return object.__new__(CategoricalOps)
elif isinstance(spark_type, FractionalType):
return object.__new__(FractionalOps)
elif isinstance(spark_type, IntegralType):
return object.__new__(IntegralOps)
elif isinstance(spark_type, StringType):
return object.__new__(StringOps)
elif isinstance(spark_type, BooleanType):
if extension_object_dtypes_available and isinstance(dtype, BooleanDtype):
return object.__new__(BooleanExtensionOps)
else:
return object.__new__(BooleanOps)
elif isinstance(spark_type, TimestampType):
return object.__new__(DatetimeOps)
elif isinstance(spark_type, DateType):
return object.__new__(DateOps)
elif isinstance(spark_type, BinaryType):
return object.__new__(BinaryOps)
elif isinstance(spark_type, ArrayType):
return object.__new__(ArrayOps)
elif isinstance(spark_type, MapType):
return object.__new__(MapOps)
elif isinstance(spark_type, StructType):
return object.__new__(StructOps)
elif isinstance(spark_type, NullType):
return object.__new__(NullOps)
elif isinstance(spark_type, UserDefinedType):
return object.__new__(UDTOps)
else:
raise TypeError("Type %s was not understood." % dtype)
def __init__(self, dtype: Dtype, spark_type: DataType):
self.dtype = dtype
self.spark_type = spark_type
@property
def pretty_name(self) -> str:
raise NotImplementedError()
def add(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Addition can not be applied to %s." % self.pretty_name)
def sub(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Subtraction can not be applied to %s." % self.pretty_name)
def mul(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Multiplication can not be applied to %s." % self.pretty_name)
def truediv(self, left, right) -> Union["Series", "Index"]:
raise TypeError("True division can not be applied to %s." % self.pretty_name)
def floordiv(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Floor division can not be applied to %s." % self.pretty_name)
def mod(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Modulo can not be applied to %s." % self.pretty_name)
def pow(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Exponentiation can not be applied to %s." % self.pretty_name)
def radd(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Addition can not be applied to %s." % self.pretty_name)
def rsub(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Subtraction can not be applied to %s." % self.pretty_name)
def rmul(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Multiplication can not be applied to %s." % self.pretty_name)
def rtruediv(self, left, right) -> Union["Series", "Index"]:
raise TypeError("True division can not be applied to %s." % self.pretty_name)
def rfloordiv(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Floor division can not be applied to %s." % self.pretty_name)
def rmod(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Modulo can not be applied to %s." % self.pretty_name)
def rpow(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Exponentiation can not be applied to %s." % self.pretty_name)
def __and__(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Bitwise and can not be applied to %s." % self.pretty_name)
def __or__(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Bitwise or can not be applied to %s." % self.pretty_name)
def rand(self, left, right) -> Union["Series", "Index"]:
return left.__and__(right)
def ror(self, left, right) -> Union["Series", "Index"]:
return left.__or__(right)
def restore(self, col: pd.Series) -> pd.Series:
"""Restore column when to_pandas."""
return col
def prepare(self, col: pd.Series) -> pd.Series:
"""Prepare column when from_pandas."""
return col.replace({np.nan: None})
def astype(
self, index_ops: Union["Index", "Series"], dtype: Union[str, type, Dtype]
) -> Union["Index", "Series"]:
raise TypeError("astype can not be applied to %s." % self.pretty_name)
| apache-2.0 |
Vimos/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
jcchin/MagnePlane | src/hyperloop/Python/ticket_cost.py | 4 | 8796 | from __future__ import print_function
import numpy as np
from openmdao.api import IndepVarComp, Component, Group, Problem, ExecComp
import matplotlib.pylab as plt
class TicketCost(Component):
'''
Notes
-------
This Component takes into account various cost figures from the system model and combines them to estimate tickt cost per passenger.
Params
-------
length_cost : float
Cost of materials per unit length. Default value is 2.437e6 USD/km
pod_cost : float
Cost per individual pod. Default value is 1.0e6 USD.
capital_cost : float
Estimate of overhead capital cost. Default value is 1.0e10 USD.
energy_cost : float
Cost of electricity. Default value is .13 USD/kWh
ib : float
Bond interest rate. Default value is .04
bm : float
Bond maturity. Default value is 20.0 years.
operating_time : float
operating time per day. Default value is 16.0*3600 s
JtokWh : float
Convert J to kWh. Default value is J/kWh
m_pod : float
Pod mass. Default value is 3100 kg
n_passengers : float
Number of passengers. Default value is 28.0
pod_period : float
Time in between pod departures. Default value is 120.0 s
avg_speed : float
average pod speed. Default value is 286.86 m/s
track_length : float
length of the track. Default value is 600e3 m
pod_power : float
Power consumption of the pod. Default value is 1.5e6 W
prop_power : float
power of an individual propulsion section. Default value is 350e3 W
vac_power : float
Power of the vacuum pumps. Default value is 71.049e6 W
alpha : float
percent of vacuum power used in steady state. Default value is .0001
vf : float
Pod top speed. Default value is 286.86 m/s
g : float
Gravity. Default value is 9.81 m/s/s
Cd : float
Pod drag coefficient. Default value is .2
S : float
Pod planform area. Default value is 40.42 m**2
p_tunnel : float
Tunnel pressure. Default value is 850.0 Pa
T_tunnel : float
Tunnel temperature. Default value is 320 K
R : float
Ideal gas constant. Default value is 287 J/kg/K
eta : float
Efficiency of propulsion system
D_mag : float
Magnetic drag. Default value is (9.81*3100.0)/200.0 N
thrust_time : float
Time spent during a propulsive section. Default value is 1.5 s
prop_period : float
distance between pripulsion sections. Defualt value is 25.0e3 km
Returns
-------
ticket_cost : float
cost of individual ticket. Default value is 0.0 USD
prop_energy_cost : float
cost of energy used by propulsion section per year. Default value is 0.0 USD
'''
def __init__(self):
super(TicketCost, self).__init__()
self.add_param('land_cost', val = 2.437e6, desc = 'Cost of materials over land per unit length', units = 'USD/km')
self.add_param('water_cost', val = 389.346941e3, desc = 'Cost of materials underwater per unit length', units = 'USD/km')
self.add_param('pod_cost', val = 1.0e6, desc = 'Cost of individual pod', units = 'USD')
self.add_param('capital_cost', val = 1.0e10, desc = 'Estimate of overhead capital cost', units = 'USD')
self.add_param('energy_cost', val = .13, desc = 'Cost of electricity', units = 'USD/kW/h')
self.add_param('ib', val = .04, desc = 'Bond interest rate', units = 'unitless')
self.add_param('bm', val = 20.0, desc = 'Bond maturity', units = 'yr')
self.add_param('operating_time', val = 16.0*3600, desc = 'Operating time per day', units = 's')
self.add_param('JtokWh', val = 2.7778e-7, desc = 'Convert Joules to kWh', units = '(kw*h)/J')
self.add_param('m_pod', val = 3100.0, desc = 'Pod Mass', units = 'kg')
self.add_param('n_passengers', val = 28.0, desc = 'number of passengers', units = 'unitless')
self.add_param('pod_period', val = 120.0, desc = 'Time in between departures', units = 's')
self.add_param('avg_speed', val = 286.86, desc = 'Average Pod Speed', units = 'm/s')
self.add_param('track_length', val = 600.0e3, desc = 'Track Length', units = 'm')
self.add_param('land_length', val = 600e3, desc = 'Length traveled over land', units = 'm')
self.add_param('water_length', val = 0.0e3, desc = 'Length traveled underwater', units = 'm')
self.add_param('pod_power', val = 1.5e6, desc = 'Power required by pod motor', units = 'W')
self.add_param('prop_power', val = 350.0e3, desc = 'Power of single propulsive section', units = 'W')
self.add_param('vac_power', val = 71.049e6, desc = 'Power of vacuums', units = 'W')
self.add_param('steady_vac_power', val = 950.0e3, desc = 'Steady State run power of vacuum pumps', units = 'W')
self.add_param('vf', val = 286.86, desc = 'Pod top speed', units = 'm/s')
self.add_param('g', val = 9.81, desc = 'Gravity', units = 'm/s/s')
self.add_param('Cd', val = .2, desc = 'Pod drag coefficient', units = 'unitless')
self.add_param('S', val = 40.42, desc = 'Pod planform area', units = 'm**2')
self.add_param('p_tunnel', val = 850.0, desc = 'Tunnel Pressure', units = 'Pa')
self.add_param('T_tunnel', val = 320.0, desc = 'Tunnel Temperature', units = 'K')
self.add_param('R', val = 287.0, desc = 'Ideal gas constant', units = 'J/kg/K')
self.add_param('eta', val = .8, desc = 'Propulsive efficiency', units = 'unitless')
self.add_param('D_mag', val = (9.81*3100.0)/200.0, desc = 'Magnetic Drag', units = 'N')
self.add_param('thrust_time', val = 1.5, desc = 'Time that pod is over propulsive section', units = 's')
self.add_param('prop_period', val = 25.0e3, desc = 'distance between propulsive sections', units = 'm')
self.add_param('num_thrust', val = 10.0, desc = 'Number of booster sections along track', units = 'unitless')
self.add_output('num_pods', val = 0.0, desc = 'Number of Pods', units = 'unitless')
self.add_output('ticket_cost', val = 0.0, desc = 'Ticket cost', units = 'USD')
self.add_output('prop_energy_cost', val = 0.0, desc = 'Cost of propulsion energy', units = 'USD')
self.add_output('tube_energy_cost', val = 0.0, desc = 'Cost of tube energy', units = 'USD')
self.add_output('total_energy_cost', val = 0.0, desc = 'Cost of energy consumpition per year', units = 'USD')
def solve_nonlinear(self, p, u,r):
land_cost = p['land_cost']
water_cost = p['water_cost']
pod_cost= p['pod_cost']
capital_cost = p['capital_cost']
energy_cost = p['energy_cost']
ib = p['ib']
bm = p['bm']
operating_time = p['operating_time']
JtokWh = p['JtokWh']
m_pod = p['m_pod']
n_passengers = p['n_passengers']
pod_period = p['pod_period']
avg_speed = p['avg_speed']
track_length = p['track_length']
land_length = p['land_length']
water_length = p['water_length']
pod_power = -1.0*p['pod_power']
prop_power = p['prop_power']
vac_power = p['vac_power']
steady_vac_power = -1.0*p['steady_vac_power']
vf = p['vf']
g = p['g']
Cd = p['Cd']
S = p['S']
p_tunnel = p['p_tunnel']
T_tunnel = p['T_tunnel']
R = p['R']
eta = p['eta']
D_mag = p['D_mag']
thrust_time = p['thrust_time']
prop_period = p['prop_period']
num_thrust = p['num_thrust']
length_cost = ((water_length/track_length)*water_cost) + ((land_length/track_length)*land_cost)
pod_frequency = 1.0/pod_period
num_pods = np.ceil((track_length/avg_speed)*pod_frequency)
flights_per_pod = (operating_time*pod_frequency)/num_pods
energy_per_flight = pod_power*(track_length/avg_speed)*.9
pod_energy = energy_per_flight*flights_per_pod*num_pods*JtokWh
vac_energy = steady_vac_power*operating_time*JtokWh
rho = p_tunnel/(R*T_tunnel)
start_distance = (vf**2)/(2*g)
start_energy = ((m_pod*g+D_mag)*start_distance + (.5*Cd*rho*g*S*(start_distance**2)))/eta
prop_energy = (num_thrust*thrust_time*prop_power + start_energy)*flights_per_pod*num_pods*JtokWh
tube_energy = prop_energy + vac_energy
u['num_pods'] = num_pods
u['prop_energy_cost'] = prop_energy*energy_cost*365
u['tube_energy_cost'] = tube_energy*energy_cost*365
u['total_energy_cost'] = (pod_energy+tube_energy)*energy_cost*365
u['ticket_cost'] = cost_ticket = (length_cost*(track_length/1000.0) + pod_cost*num_pods + capital_cost*(1.0+ib) + \
energy_cost*(tube_energy + pod_energy)*365.0)/(n_passengers*pod_frequency*bm*365.0*24.0*3600.0)
if __name__ == '__main__':
top = Problem()
root = top.root = Group()
params = (('n_passengers', 28.0),
('track_length', 600.0e3, {'units' : 'm'}))
root.add('p', TicketCost())
root.add('des_vars', IndepVarComp(params), promotes = ['n_passengers'])
root.connect('n_passengers', 'p.n_passengers')
root.connect('des_vars.track_length', 'p.track_length')
top.setup()
top.run()
print(top['p.ticket_cost'])
# n_passengers = np.linspace(10,100,num = 90)
# ticket_cost = np.zeros((1, len(n_passengers)))
# for i in range(len(n_passengers)):
# top['n_passengers'] = n_passengers[i]
# top.run()
# ticket_cost[0, i] = top['p.ticket_cost']
# plt.plot(n_passengers*(175200.0/(1.0e6)), ticket_cost[0,:])
# plt.show()
| apache-2.0 |
jdmcbr/geopandas | benchmarks/sindex.py | 2 | 3488 | from shapely.geometry import Point
from geopandas import read_file, datasets, GeoSeries
# Derive list of valid query predicates based on underlying index backend;
# we have to create a non-empty instance of the index to get these
index = GeoSeries([Point(0, 0)]).sindex
predicates = sorted(p for p in index.valid_query_predicates if p is not None)
geom_types = ("mixed", "points", "polygons")
def generate_test_df():
world = read_file(datasets.get_path("naturalearth_lowres"))
capitals = read_file(datasets.get_path("naturalearth_cities"))
countries = world.to_crs("epsg:3395")[["geometry"]]
capitals = capitals.to_crs("epsg:3395")[["geometry"]]
mixed = capitals.append(countries) # get a mix of geometries
points = capitals
polygons = countries
# filter out invalid geometries
data = {
"mixed": mixed[mixed.is_valid],
"points": points[points.is_valid],
"polygons": polygons[polygons.is_valid],
}
# ensure index is pre-generated
for data_type in data.keys():
data[data_type].sindex.query(data[data_type].geometry.values.data[0])
return data
class BenchIntersection:
param_names = ["input_geom_type", "tree_geom_type"]
params = [
geom_types,
geom_types,
]
def setup(self, *args):
self.data = generate_test_df()
# cache bounds so that bound creation is not counted in benchmarks
self.bounds = {
data_type: [g.bounds for g in self.data[data_type].geometry]
for data_type in self.data.keys()
}
def time_intersects(self, input_geom_type, tree_geom_type):
tree = self.data[tree_geom_type].sindex
for bounds in self.bounds[input_geom_type]:
tree.intersection(bounds)
class BenchIndexCreation:
param_names = ["tree_geom_type"]
params = [
geom_types,
]
def setup(self, *args):
self.data = generate_test_df()
def time_index_creation(self, tree_geom_type):
"""Time creation of spatial index.
Note: requires running a single query to ensure that
lazy-building indexes are actually built.
"""
# Note: the GeoDataFram._sindex_generated attribute will
# be removed by GH#1444 but is kept here (in the benchmarks
# so that we can compare pre GH#1444 to post GH#1444 if needed
self.data[tree_geom_type]._sindex_generated = None
self.data[tree_geom_type].geometry.values._sindex = None
tree = self.data[tree_geom_type].sindex
# also do a single query to ensure the index is actually
# generated and used
tree.query(
self.data[tree_geom_type].geometry.values.data[0]
)
class BenchQuery:
param_names = ["predicate", "input_geom_type", "tree_geom_type"]
params = [
predicates,
geom_types,
geom_types,
]
def setup(self, *args):
self.data = generate_test_df()
def time_query_bulk(self, predicate, input_geom_type, tree_geom_type):
self.data[tree_geom_type].sindex.query_bulk(
self.data[input_geom_type].geometry.values.data,
predicate=predicate,
)
def time_query(self, predicate, input_geom_type, tree_geom_type):
tree = self.data[tree_geom_type].sindex
for geom in self.data[input_geom_type].geometry.values.data:
tree.query(
geom,
predicate=predicate
)
| bsd-3-clause |
abhisg/scikit-learn | sklearn/linear_model/bayes.py | 220 | 15248 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| bsd-3-clause |
clarkfitzg/xray | setup.py | 2 | 4581 | #!/usr/bin/env python
import os
import re
import sys
import warnings
from setuptools import setup, find_packages
MAJOR = 0
MINOR = 5
MICRO = 1
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
QUALIFIER = ''
DISTNAME = 'xray'
LICENSE = 'Apache'
AUTHOR = 'xray Developers'
AUTHOR_EMAIL = 'xray-dev@googlegroups.com'
URL = 'https://github.com/xray/xray'
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering',
]
INSTALL_REQUIRES = ['numpy >= 1.7', 'pandas >= 0.15.0']
TESTS_REQUIRE = ['nose >= 1.0']
if sys.version_info[:2] < (2, 7):
TESTS_REQUIRE += ["unittest2 == 0.5.1"]
DESCRIPTION = "N-D labeled arrays and datasets in Python"
LONG_DESCRIPTION = """
**xray** is an open source project and Python package that aims to bring the
labeled data power of pandas_ to the physical sciences, by providing
N-dimensional variants of the core pandas data structures.
Our goal is to provide a pandas-like and pandas-compatible toolkit for
analytics on multi-dimensional arrays, rather than the tabular data for which
pandas excels. Our approach adopts the `Common Data Model`_ for self-
describing scientific data in widespread use in the Earth sciences:
``xray.Dataset`` is an in-memory representation of a netCDF file.
.. _pandas: http://pandas.pydata.org
.. _Common Data Model: http://www.unidata.ucar.edu/software/thredds/current/netcdf-java/CDM
.. _netCDF: http://www.unidata.ucar.edu/software/netcdf
.. _OPeNDAP: http://www.opendap.org/
Important links
---------------
- HTML documentation: http://xray.readthedocs.org
- Issue tracker: http://github.com/xray/xray/issues
- Source code: http://github.com/xray/xray
- PyData talk: https://www.youtube.com/watch?v=T5CZyNwBa9c
"""
# code to extract and write the version copied from pandas
FULLVERSION = VERSION
write_version = True
if not ISRELEASED:
import subprocess
FULLVERSION += '.dev'
pipe = None
for cmd in ['git', 'git.cmd']:
try:
pipe = subprocess.Popen(
[cmd, "describe", "--always", "--match", "v[0-9]*"],
stdout=subprocess.PIPE)
(so, serr) = pipe.communicate()
if pipe.returncode == 0:
break
except:
pass
if pipe is None or pipe.returncode != 0:
# no git, or not in git dir
if os.path.exists('xray/version.py'):
warnings.warn("WARNING: Couldn't get git revision, using existing xray/version.py")
write_version = False
else:
warnings.warn("WARNING: Couldn't get git revision, using generic version string")
else:
# have git, in git dir, but may have used a shallow clone (travis does this)
rev = so.strip()
# makes distutils blow up on Python 2.7
if sys.version_info[0] >= 3:
rev = rev.decode('ascii')
if not rev.startswith('v') and re.match("[a-zA-Z0-9]{7,9}", rev):
# partial clone, manually construct version string
# this is the format before we started using git-describe
# to get an ordering on dev version strings.
rev = "v%s.dev-%s" % (VERSION, rev)
# Strip leading v from tags format "vx.y.z" to get th version string
FULLVERSION = rev.lstrip('v')
else:
FULLVERSION += QUALIFIER
def write_version_py(filename=None):
cnt = """\
version = '%s'
short_version = '%s'
"""
if not filename:
filename = os.path.join(
os.path.dirname(__file__), 'xray', 'version.py')
a = open(filename, 'w')
try:
a.write(cnt % (FULLVERSION, VERSION))
finally:
a.close()
if write_version:
write_version_py()
setup(name=DISTNAME,
version=FULLVERSION,
license=LICENSE,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
classifiers=CLASSIFIERS,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
install_requires=INSTALL_REQUIRES,
tests_require=TESTS_REQUIRE,
url=URL,
test_suite='nose.collector',
packages=find_packages(),
package_data={'xray': ['test/data/*']})
| apache-2.0 |
shangwuhencc/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
rbalda/neural_ocr | env/lib/python2.7/site-packages/matplotlib/compat/subprocess.py | 19 | 2827 | """
A replacement wrapper around the subprocess module, with a number of
work-arounds:
- Provides the check_output function (which subprocess only provides from Python
2.7 onwards).
- Provides a stub implementation of subprocess members on Google App Engine
(which are missing in subprocess).
Instead of importing subprocess, other modules should use this as follows:
from matplotlib.compat import subprocess
This module is safe to import from anywhere within matplotlib.
"""
from __future__ import absolute_import # Required to import subprocess
from __future__ import print_function
import subprocess
__all__ = ['Popen', 'PIPE', 'STDOUT', 'check_output', 'CalledProcessError']
if hasattr(subprocess, 'Popen'):
Popen = subprocess.Popen
# Assume that it also has the other constants.
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
CalledProcessError = subprocess.CalledProcessError
else:
# In restricted environments (such as Google App Engine), these are
# non-existent. Replace them with dummy versions that always raise OSError.
def Popen(*args, **kwargs):
raise OSError("subprocess.Popen is not supported")
PIPE = -1
STDOUT = -2
# There is no need to catch CalledProcessError. These stubs cannot raise
# it. None in an except clause will simply not match any exceptions.
CalledProcessError = None
def _check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte
string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the
returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example::
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.::
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = Popen(stdout=PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
# python2.7's subprocess provides a check_output method
if hasattr(subprocess, 'check_output'):
check_output = subprocess.check_output
else:
check_output = _check_output
| mit |
ycaihua/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 28 | 11950 | import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn import grid_search
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
"""Verify the shapes of the imputed matrix for different strategies."""
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
"""Test imputation using the mean and median strategies, when
missing_values == 0."""
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def test_imputation_mean_median():
"""Test imputation using the mean and median strategies, when
missing_values != 0."""
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0]+1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: np.mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: np.median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
"""Test median imputation with sparse boundary cases
"""
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
"""Test imputation using the most-frequent strategy."""
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
"""Test imputation within a pipeline + gridsearch."""
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = grid_search.GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
"""Test for pickling imputers."""
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
"""Test imputation with copy"""
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
| bsd-3-clause |
SamStudio8/scikit-bio | skbio/sequence/tests/test_sequence.py | 2 | 106092 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import six
from six.moves import zip_longest
import copy
import re
from types import GeneratorType
from collections import Counter, defaultdict, Hashable
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
import pandas as pd
from skbio import Sequence
from skbio.util import assert_data_frame_almost_equal
from skbio.sequence._sequence import (_single_index_to_slice, _is_single_index,
_as_slice_if_single_index)
class SequenceSubclass(Sequence):
"""Used for testing purposes."""
pass
class TestSequence(TestCase):
def setUp(self):
self.lowercase_seq = Sequence('AAAAaaaa', lowercase='key')
self.sequence_kinds = frozenset([
str, Sequence, lambda s: np.fromstring(s, dtype='|S1'),
lambda s: np.fromstring(s, dtype=np.uint8)])
def empty_generator():
raise StopIteration()
yield
self.getitem_empty_indices = [
[],
(),
{},
empty_generator(),
# ndarray of implicit float dtype
np.array([]),
np.array([], dtype=int)]
def test_init_default_parameters(self):
seq = Sequence('.ABC123xyz-')
npt.assert_equal(seq.values, np.array('.ABC123xyz-', dtype='c'))
self.assertEqual('.ABC123xyz-', str(seq))
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(11)))
def test_init_nondefault_parameters(self):
seq = Sequence('.ABC123xyz-',
metadata={'id': 'foo', 'description': 'bar baz'},
positional_metadata={'quality': range(11)})
npt.assert_equal(seq.values, np.array('.ABC123xyz-', dtype='c'))
self.assertEqual('.ABC123xyz-', str(seq))
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, {'id': 'foo', 'description': 'bar baz'})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'quality': range(11)}, index=np.arange(11)))
def test_init_handles_missing_metadata_efficiently(self):
seq = Sequence('ACGT')
# metadata attributes should be None and not initialized to a "missing"
# representation
self.assertIsNone(seq._metadata)
self.assertIsNone(seq._positional_metadata)
# initializing from an existing Sequence object should handle metadata
# attributes efficiently on both objects
new_seq = Sequence(seq)
self.assertIsNone(seq._metadata)
self.assertIsNone(seq._positional_metadata)
self.assertIsNone(new_seq._metadata)
self.assertIsNone(new_seq._positional_metadata)
self.assertFalse(seq.has_metadata())
self.assertFalse(seq.has_positional_metadata())
self.assertFalse(new_seq.has_metadata())
self.assertFalse(new_seq.has_positional_metadata())
def test_init_empty_sequence(self):
# Test constructing an empty sequence using each supported input type.
for s in (b'', # bytes
u'', # unicode
np.array('', dtype='c'), # char vector
np.fromstring('', dtype=np.uint8), # byte vec
Sequence('')): # another Sequence object
seq = Sequence(s)
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
self.assertEqual(seq.values.shape, (0, ))
npt.assert_equal(seq.values, np.array('', dtype='c'))
self.assertEqual(str(seq), '')
self.assertEqual(len(seq), 0)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(0)))
def test_init_single_character_sequence(self):
for s in (b'A',
u'A',
np.array('A', dtype='c'),
np.fromstring('A', dtype=np.uint8),
Sequence('A')):
seq = Sequence(s)
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
self.assertEqual(seq.values.shape, (1,))
npt.assert_equal(seq.values, np.array('A', dtype='c'))
self.assertEqual(str(seq), 'A')
self.assertEqual(len(seq), 1)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(1)))
def test_init_multiple_character_sequence(self):
for s in (b'.ABC\t123 xyz-',
u'.ABC\t123 xyz-',
np.array('.ABC\t123 xyz-', dtype='c'),
np.fromstring('.ABC\t123 xyz-', dtype=np.uint8),
Sequence('.ABC\t123 xyz-')):
seq = Sequence(s)
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
self.assertEqual(seq.values.shape, (14,))
npt.assert_equal(seq.values,
np.array('.ABC\t123 xyz-', dtype='c'))
self.assertEqual(str(seq), '.ABC\t123 xyz-')
self.assertEqual(len(seq), 14)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(14)))
def test_init_from_sequence_object(self):
# We're testing this in its simplest form in other tests. This test
# exercises more complicated cases of building a sequence from another
# sequence.
# just the sequence, no other metadata
seq = Sequence('ACGT')
self.assertEqual(Sequence(seq), seq)
# sequence with metadata should have everything propagated
seq = Sequence('ACGT',
metadata={'id': 'foo', 'description': 'bar baz'},
positional_metadata={'quality': range(4)})
self.assertEqual(Sequence(seq), seq)
# should be able to override metadata
self.assertEqual(
Sequence(seq, metadata={'id': 'abc', 'description': '123'},
positional_metadata={'quality': [42] * 4}),
Sequence('ACGT', metadata={'id': 'abc', 'description': '123'},
positional_metadata={'quality': [42] * 4}))
# subclasses work too
seq = SequenceSubclass('ACGT',
metadata={'id': 'foo',
'description': 'bar baz'},
positional_metadata={'quality': range(4)})
self.assertEqual(
Sequence(seq),
Sequence('ACGT', metadata={'id': 'foo', 'description': 'bar baz'},
positional_metadata={'quality': range(4)}))
def test_init_from_contiguous_sequence_bytes_view(self):
bytes = np.array([65, 42, 66, 42, 65], dtype=np.uint8)
view = bytes[:3]
seq = Sequence(view)
# sequence should be what we'd expect
self.assertEqual(seq, Sequence('A*B'))
# we shouldn't own the memory because no copy should have been made
self.assertFalse(seq._owns_bytes)
# can't mutate view because it isn't writeable anymore
with self.assertRaises(ValueError):
view[1] = 100
# sequence shouldn't have changed
self.assertEqual(seq, Sequence('A*B'))
# mutate bytes (*not* the view)
bytes[0] = 99
# Sequence changed because we are only able to make the view read-only,
# not its source (bytes). This is somewhat inconsistent behavior that
# is (to the best of our knowledge) outside our control.
self.assertEqual(seq, Sequence('c*B'))
def test_init_from_noncontiguous_sequence_bytes_view(self):
bytes = np.array([65, 42, 66, 42, 65], dtype=np.uint8)
view = bytes[::2]
seq = Sequence(view)
# sequence should be what we'd expect
self.assertEqual(seq, Sequence('ABA'))
# we should own the memory because a copy should have been made
self.assertTrue(seq._owns_bytes)
# mutate bytes and its view
bytes[0] = 99
view[1] = 100
# sequence shouldn't have changed
self.assertEqual(seq, Sequence('ABA'))
def test_init_no_copy_of_sequence(self):
bytes = np.array([65, 66, 65], dtype=np.uint8)
seq = Sequence(bytes)
# should share the same memory
self.assertIs(seq._bytes, bytes)
# shouldn't be able to mutate the Sequence object's internals by
# mutating the shared memory
with self.assertRaises(ValueError):
bytes[1] = 42
def test_init_empty_metadata(self):
for empty in None, {}:
seq = Sequence('', metadata=empty)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
def test_init_empty_metadata_key(self):
seq = Sequence('', metadata={'': ''})
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, {'': ''})
def test_init_empty_metadata_item(self):
seq = Sequence('', metadata={'foo': ''})
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, {'foo': ''})
def test_init_single_character_metadata_item(self):
seq = Sequence('', metadata={'foo': 'z'})
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, {'foo': 'z'})
def test_init_multiple_character_metadata_item(self):
seq = Sequence('', metadata={'foo': '\nabc\tdef G123'})
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, {'foo': '\nabc\tdef G123'})
def test_init_metadata_multiple_keys(self):
seq = Sequence('', metadata={'foo': 'abc', 42: {'nested': 'metadata'}})
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata,
{'foo': 'abc', 42: {'nested': 'metadata'}})
def test_init_empty_positional_metadata(self):
# empty seq with missing/empty positional metadata
for empty in None, {}, pd.DataFrame():
seq = Sequence('', positional_metadata=empty)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(0)))
# non-empty seq with missing positional metadata
seq = Sequence('xyz', positional_metadata=None)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(3)))
def test_init_empty_positional_metadata_item(self):
for item in ([], (), np.array([])):
seq = Sequence('', positional_metadata={'foo': item})
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': item}, index=np.arange(0)))
def test_init_single_positional_metadata_item(self):
for item in ([2], (2, ), np.array([2])):
seq = Sequence('G', positional_metadata={'foo': item})
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': item}, index=np.arange(1)))
def test_init_multiple_positional_metadata_item(self):
for item in ([0, 42, 42, 1, 0, 8, 100, 0, 0],
(0, 42, 42, 1, 0, 8, 100, 0, 0),
np.array([0, 42, 42, 1, 0, 8, 100, 0, 0])):
seq = Sequence('G' * 9, positional_metadata={'foo': item})
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': item}, index=np.arange(9)))
def test_init_positional_metadata_multiple_columns(self):
seq = Sequence('^' * 5,
positional_metadata={'foo': np.arange(5),
'bar': np.arange(5)[::-1]})
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': np.arange(5),
'bar': np.arange(5)[::-1]}, index=np.arange(5)))
def test_init_positional_metadata_with_custom_index(self):
df = pd.DataFrame({'foo': np.arange(5), 'bar': np.arange(5)[::-1]},
index=['a', 'b', 'c', 'd', 'e'])
seq = Sequence('^' * 5, positional_metadata=df)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': np.arange(5),
'bar': np.arange(5)[::-1]}, index=np.arange(5)))
def test_init_invalid_sequence(self):
# invalid dtype (numpy.ndarray input)
with self.assertRaises(TypeError):
# int64
Sequence(np.array([1, 2, 3]))
with self.assertRaises(TypeError):
# |S21
Sequence(np.array([1, "23", 3]))
with self.assertRaises(TypeError):
# object
Sequence(np.array([1, {}, ()]))
# invalid input type (non-numpy.ndarray input)
with six.assertRaisesRegex(self, TypeError, 'tuple'):
Sequence(('a', 'b', 'c'))
with six.assertRaisesRegex(self, TypeError, 'list'):
Sequence(['a', 'b', 'c'])
with six.assertRaisesRegex(self, TypeError, 'set'):
Sequence({'a', 'b', 'c'})
with six.assertRaisesRegex(self, TypeError, 'dict'):
Sequence({'a': 42, 'b': 43, 'c': 44})
with six.assertRaisesRegex(self, TypeError, 'int'):
Sequence(42)
with six.assertRaisesRegex(self, TypeError, 'float'):
Sequence(4.2)
with six.assertRaisesRegex(self, TypeError, 'int64'):
Sequence(np.int_(50))
with six.assertRaisesRegex(self, TypeError, 'float64'):
Sequence(np.float_(50))
with six.assertRaisesRegex(self, TypeError, 'Foo'):
class Foo(object):
pass
Sequence(Foo())
# out of ASCII range
with self.assertRaises(UnicodeEncodeError):
Sequence(u'abc\u1F30')
def test_init_invalid_metadata(self):
for md in (0, 'a', ('f', 'o', 'o'), np.array([]), pd.DataFrame()):
with six.assertRaisesRegex(self, TypeError,
'metadata must be a dict'):
Sequence('abc', metadata=md)
def test_init_invalid_positional_metadata(self):
# not consumable by Pandas
with six.assertRaisesRegex(self, TypeError,
'Positional metadata invalid. Must be '
'consumable by pd.DataFrame. '
'Original pandas error message: '):
Sequence('ACGT', positional_metadata=2)
# 0 elements
with six.assertRaisesRegex(self, ValueError, '\(0\).*\(4\)'):
Sequence('ACGT', positional_metadata=[])
# not enough elements
with six.assertRaisesRegex(self, ValueError, '\(3\).*\(4\)'):
Sequence('ACGT', positional_metadata=[2, 3, 4])
# too many elements
with six.assertRaisesRegex(self, ValueError, '\(5\).*\(4\)'):
Sequence('ACGT', positional_metadata=[2, 3, 4, 5, 6])
# Series not enough rows
with six.assertRaisesRegex(self, ValueError, '\(3\).*\(4\)'):
Sequence('ACGT', positional_metadata=pd.Series(range(3)))
# Series too many rows
with six.assertRaisesRegex(self, ValueError, '\(5\).*\(4\)'):
Sequence('ACGT', positional_metadata=pd.Series(range(5)))
# DataFrame not enough rows
with six.assertRaisesRegex(self, ValueError, '\(3\).*\(4\)'):
Sequence('ACGT',
positional_metadata=pd.DataFrame({'quality': range(3)}))
# DataFrame too many rows
with six.assertRaisesRegex(self, ValueError, '\(5\).*\(4\)'):
Sequence('ACGT',
positional_metadata=pd.DataFrame({'quality': range(5)}))
def test_values_property(self):
# Property tests are only concerned with testing the interface
# provided by the property: that it can be accessed, can't be
# reassigned or mutated in place, and that the correct type is
# returned. More extensive testing of border cases (e.g., different
# sequence lengths or input types, odd characters, etc.) are performed
# in Sequence.__init__ tests.
seq = Sequence('ACGT')
# should get back a numpy.ndarray of '|S1' dtype
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
npt.assert_equal(seq.values, np.array('ACGT', dtype='c'))
# test that we can't mutate the property
with self.assertRaises(ValueError):
seq.values[1] = 'A'
# test that we can't set the property
with self.assertRaises(AttributeError):
seq.values = np.array("GGGG", dtype='c')
def test_metadata_property_getter(self):
md = {'foo': 'bar'}
seq = Sequence('', metadata=md)
self.assertIsInstance(seq.metadata, dict)
self.assertEqual(seq.metadata, md)
self.assertIsNot(seq.metadata, md)
# update existing key
seq.metadata['foo'] = 'baz'
self.assertEqual(seq.metadata, {'foo': 'baz'})
# add new key
seq.metadata['foo2'] = 'bar2'
self.assertEqual(seq.metadata, {'foo': 'baz', 'foo2': 'bar2'})
def test_metadata_property_getter_missing(self):
seq = Sequence('ACGT')
self.assertIsNone(seq._metadata)
self.assertEqual(seq.metadata, {})
self.assertIsNotNone(seq._metadata)
def test_metadata_property_setter(self):
md = {'foo': 'bar'}
seq = Sequence('', metadata=md)
self.assertEqual(seq.metadata, md)
self.assertIsNot(seq.metadata, md)
new_md = {'bar': 'baz', 42: 42}
seq.metadata = new_md
self.assertEqual(seq.metadata, new_md)
self.assertIsNot(seq.metadata, new_md)
seq.metadata = {}
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_metadata())
def test_metadata_property_setter_invalid_type(self):
seq = Sequence('abc', metadata={123: 456})
for md in (None, 0, 'a', ('f', 'o', 'o'), np.array([]),
pd.DataFrame()):
with six.assertRaisesRegex(self, TypeError,
'metadata must be a dict'):
seq.metadata = md
# object should still be usable and its original metadata shouldn't
# have changed
self.assertEqual(seq.metadata, {123: 456})
def test_metadata_property_deleter(self):
md = {'foo': 'bar'}
seq = Sequence('CAT', metadata=md)
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, md)
self.assertIsNot(seq.metadata, md)
del seq.metadata
self.assertIsNone(seq._metadata)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
# test deleting again
del seq.metadata
self.assertIsNone(seq._metadata)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
# test deleting missing metadata immediately after instantiation
seq = Sequence('ACGT')
self.assertIsNone(seq._metadata)
del seq.metadata
self.assertIsNone(seq._metadata)
def test_metadata_property_shallow_copy(self):
md = {'key1': 'val1', 'key2': 'val2', 'key3': [1, 2]}
seq = Sequence('CAT', metadata=md)
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, md)
self.assertIsNot(seq.metadata, md)
# updates to keys
seq.metadata['key1'] = 'new val'
self.assertEqual(seq.metadata,
{'key1': 'new val', 'key2': 'val2', 'key3': [1, 2]})
# original metadata untouched
self.assertEqual(md, {'key1': 'val1', 'key2': 'val2', 'key3': [1, 2]})
# updates to mutable value (by reference)
seq.metadata['key3'].append(3)
self.assertEqual(
seq.metadata,
{'key1': 'new val', 'key2': 'val2', 'key3': [1, 2, 3]})
# original metadata changed because we didn't deep copy
self.assertEqual(
md,
{'key1': 'val1', 'key2': 'val2', 'key3': [1, 2, 3]})
def test_positional_metadata_property_getter(self):
md = pd.DataFrame({'foo': [22, 22, 0]})
seq = Sequence('ACA', positional_metadata=md)
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0]}))
self.assertIsNot(seq.positional_metadata, md)
# update existing column
seq.positional_metadata['foo'] = [42, 42, 43]
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame({'foo': [42, 42, 43]}))
# add new column
seq.positional_metadata['foo2'] = [True, False, True]
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': [42, 42, 43],
'foo2': [True, False, True]}))
def test_positional_metadata_property_getter_missing(self):
seq = Sequence('ACGT')
self.assertIsNone(seq._positional_metadata)
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame(index=np.arange(4)))
self.assertIsNotNone(seq._positional_metadata)
def test_positional_metadata_property_setter(self):
md = pd.DataFrame({'foo': [22, 22, 0]})
seq = Sequence('ACA', positional_metadata=md)
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0]}))
self.assertIsNot(seq.positional_metadata, md)
new_md = pd.DataFrame({'bar': np.arange(3)}, index=['a', 'b', 'c'])
seq.positional_metadata = new_md
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'bar': np.arange(3)}, index=np.arange(3)))
self.assertIsNot(seq.positional_metadata, new_md)
seq.positional_metadata = pd.DataFrame(index=np.arange(3))
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(3)))
self.assertFalse(seq.has_positional_metadata())
def test_positional_metadata_property_setter_invalid_type(self):
# More extensive tests for invalid input are on Sequence.__init__ tests
seq = Sequence('abc', positional_metadata={'foo': [1, 2, 42]})
# not consumable by Pandas
with six.assertRaisesRegex(self, TypeError,
'Positional metadata invalid. Must be '
'consumable by pd.DataFrame. '
'Original pandas error message: '):
seq.positional_metadata = 2
# object should still be usable and its original metadata shouldn't
# have changed
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame({'foo': [1, 2, 42]}))
# wrong length
with six.assertRaisesRegex(self, ValueError, '\(2\).*\(3\)'):
seq.positional_metadata = {'foo': [1, 2]}
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame({'foo': [1, 2, 42]}))
# None isn't valid when using setter (differs from constructor)
with six.assertRaisesRegex(self, ValueError, '\(0\).*\(3\)'):
seq.positional_metadata = None
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame({'foo': [1, 2, 42]}))
def test_positional_metadata_property_deleter(self):
md = pd.DataFrame({'foo': [22, 22, 0]})
seq = Sequence('ACA', positional_metadata=md)
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0]}))
self.assertIsNot(seq.positional_metadata, md)
del seq.positional_metadata
self.assertIsNone(seq._positional_metadata)
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(3)))
# test deleting again
del seq.positional_metadata
self.assertIsNone(seq._positional_metadata)
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(3)))
# test deleting missing positional metadata immediately after
# instantiation
seq = Sequence('ACGT')
self.assertIsNone(seq._positional_metadata)
del seq.positional_metadata
self.assertIsNone(seq._positional_metadata)
def test_positional_metadata_property_shallow_copy(self):
# define metadata as a DataFrame because this has the potential to have
# its underlying data shared
md = pd.DataFrame({'foo': [22, 22, 0]}, index=['a', 'b', 'c'])
seq = Sequence('ACA', positional_metadata=md)
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0]}, index=np.arange(3)))
self.assertIsNot(seq.positional_metadata, md)
# original metadata untouched
orig_md = pd.DataFrame({'foo': [22, 22, 0]}, index=['a', 'b', 'c'])
assert_data_frame_almost_equal(md, orig_md)
# change values of column (using same dtype)
seq.positional_metadata['foo'] = [42, 42, 42]
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': [42, 42, 42]}, index=np.arange(3)))
# original metadata untouched
assert_data_frame_almost_equal(md, orig_md)
# change single value of underlying data
seq.positional_metadata.values[0][0] = 10
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': [10, 42, 42]}, index=np.arange(3)))
# original metadata untouched
assert_data_frame_almost_equal(md, orig_md)
# create column of object dtype -- these aren't deep copied
md = pd.DataFrame({'obj': [[], [], []]}, index=['a', 'b', 'c'])
seq = Sequence('ACA', positional_metadata=md)
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'obj': [[], [], []]}, index=np.arange(3)))
# mutate list
seq.positional_metadata['obj'][0].append(42)
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'obj': [[42], [], []]}, index=np.arange(3)))
# original metadata changed because we didn't do a full deep copy
assert_data_frame_almost_equal(
md,
pd.DataFrame({'obj': [[42], [], []]}, index=['a', 'b', 'c']))
def test_positional_metadata_property_set_column_series(self):
seq_text = 'ACGTACGT'
l = len(seq_text)
seq = Sequence(seq_text, positional_metadata={'foo': range(l)})
seq.positional_metadata['bar'] = pd.Series(range(l-3))
# pandas.Series will be padded with NaN if too short
npt.assert_equal(seq.positional_metadata['bar'],
np.array(list(range(l-3)) + [np.NaN]*3))
seq.positional_metadata['baz'] = pd.Series(range(l+3))
# pandas.Series will be truncated if too long
npt.assert_equal(seq.positional_metadata['baz'],
np.array(range(l)))
def test_positional_metadata_property_set_column_array(self):
seq_text = 'ACGTACGT'
l = len(seq_text)
seq = Sequence(seq_text, positional_metadata={'foo': range(l)})
# array-like objects will fail if wrong size
for array_like in (np.array(range(l-1)), range(l-1),
np.array(range(l+1)), range(l+1)):
with six.assertRaisesRegex(self, ValueError,
"Length of values does not match "
"length of index"):
seq.positional_metadata['bar'] = array_like
def test_eq_and_ne(self):
seq_a = Sequence("A")
seq_b = Sequence("B")
self.assertTrue(seq_a == seq_a)
self.assertTrue(Sequence("a") == Sequence("a"))
self.assertTrue(Sequence("a", metadata={'id': 'b'}) ==
Sequence("a", metadata={'id': 'b'}))
self.assertTrue(Sequence("a",
metadata={'id': 'b', 'description': 'c'}) ==
Sequence("a",
metadata={'id': 'b', 'description': 'c'}))
self.assertTrue(Sequence("a", metadata={'id': 'b', 'description': 'c'},
positional_metadata={'quality': [1]}) ==
Sequence("a", metadata={'id': 'b', 'description': 'c'},
positional_metadata={'quality': [1]}))
self.assertTrue(seq_a != seq_b)
self.assertTrue(SequenceSubclass("a") != Sequence("a"))
self.assertTrue(Sequence("a") != Sequence("b"))
self.assertTrue(Sequence("a") != Sequence("a", metadata={'id': 'b'}))
self.assertTrue(Sequence("a", metadata={'id': 'c'}) !=
Sequence("a",
metadata={'id': 'c', 'description': 't'}))
self.assertTrue(Sequence("a", positional_metadata={'quality': [1]}) !=
Sequence("a"))
self.assertTrue(Sequence("a", positional_metadata={'quality': [1]}) !=
Sequence("a", positional_metadata={'quality': [2]}))
self.assertTrue(Sequence("c", positional_metadata={'quality': [3]}) !=
Sequence("b", positional_metadata={'quality': [3]}))
self.assertTrue(Sequence("a", metadata={'id': 'b'}) !=
Sequence("c", metadata={'id': 'b'}))
def test_eq_sequences_without_metadata_compare_equal(self):
self.assertTrue(Sequence('') == Sequence(''))
self.assertTrue(Sequence('z') == Sequence('z'))
self.assertTrue(
Sequence('ACGT') == Sequence('ACGT'))
def test_eq_sequences_with_metadata_compare_equal(self):
seq1 = Sequence('ACGT', metadata={'id': 'foo', 'desc': 'abc'},
positional_metadata={'qual': [1, 2, 3, 4]})
seq2 = Sequence('ACGT', metadata={'id': 'foo', 'desc': 'abc'},
positional_metadata={'qual': [1, 2, 3, 4]})
self.assertTrue(seq1 == seq2)
# order shouldn't matter
self.assertTrue(seq2 == seq1)
def test_eq_sequences_from_different_sources_compare_equal(self):
# sequences that have the same data but are constructed from different
# types of data should compare equal
seq1 = Sequence('ACGT', metadata={'id': 'foo', 'desc': 'abc'},
positional_metadata={'quality': (1, 2, 3, 4)})
seq2 = Sequence(np.array([65, 67, 71, 84], dtype=np.uint8),
metadata={'id': 'foo', 'desc': 'abc'},
positional_metadata={'quality': np.array([1, 2, 3,
4])})
self.assertTrue(seq1 == seq2)
def test_eq_type_mismatch(self):
seq1 = Sequence('ACGT')
seq2 = SequenceSubclass('ACGT')
self.assertFalse(seq1 == seq2)
def test_eq_metadata_mismatch(self):
# both provided
seq1 = Sequence('ACGT', metadata={'id': 'foo'})
seq2 = Sequence('ACGT', metadata={'id': 'bar'})
self.assertFalse(seq1 == seq2)
# one provided
seq1 = Sequence('ACGT', metadata={'id': 'foo'})
seq2 = Sequence('ACGT')
self.assertFalse(seq1 == seq2)
def test_eq_positional_metadata_mismatch(self):
# both provided
seq1 = Sequence('ACGT', positional_metadata={'quality': [1, 2, 3, 4]})
seq2 = Sequence('ACGT', positional_metadata={'quality': [1, 2, 3, 5]})
self.assertFalse(seq1 == seq2)
# one provided
seq1 = Sequence('ACGT', positional_metadata={'quality': [1, 2, 3, 4]})
seq2 = Sequence('ACGT')
self.assertFalse(seq1 == seq2)
def test_eq_sequence_mismatch(self):
seq1 = Sequence('ACGT')
seq2 = Sequence('TGCA')
self.assertFalse(seq1 == seq2)
def test_eq_handles_missing_metadata_efficiently(self):
seq1 = Sequence('ACGT')
seq2 = Sequence('ACGT')
self.assertTrue(seq1 == seq2)
# metadata attributes should be None and not initialized to a "missing"
# representation
self.assertIsNone(seq1._metadata)
self.assertIsNone(seq1._positional_metadata)
self.assertIsNone(seq2._metadata)
self.assertIsNone(seq2._positional_metadata)
def test_getitem_gives_new_sequence(self):
seq = Sequence("Sequence string !1@2#3?.,")
self.assertFalse(seq is seq[:])
def test_getitem_with_int_has_positional_metadata(self):
s = "Sequence string !1@2#3?.,"
length = len(s)
seq = Sequence(s, metadata={'id': 'id', 'description': 'dsc'},
positional_metadata={'quality': np.arange(length)})
eseq = Sequence("S", {'id': 'id', 'description': 'dsc'},
positional_metadata={'quality': np.array([0])})
self.assertEqual(seq[0], eseq)
eseq = Sequence(",", metadata={'id': 'id', 'description': 'dsc'},
positional_metadata={'quality':
np.array([len(seq) - 1])})
self.assertEqual(seq[len(seq) - 1], eseq)
eseq = Sequence("t", metadata={'id': 'id', 'description': 'dsc'},
positional_metadata={'quality': [10]})
self.assertEqual(seq[10], eseq)
def test_single_index_to_slice(self):
a = [1, 2, 3, 4]
self.assertEqual(slice(0, 1), _single_index_to_slice(0))
self.assertEqual([1], a[_single_index_to_slice(0)])
self.assertEqual(slice(-1, None),
_single_index_to_slice(-1))
self.assertEqual([4], a[_single_index_to_slice(-1)])
def test_is_single_index(self):
self.assertTrue(_is_single_index(0))
self.assertFalse(_is_single_index(True))
self.assertFalse(_is_single_index(bool()))
self.assertFalse(_is_single_index('a'))
def test_as_slice_if_single_index(self):
self.assertEqual(slice(0, 1), _as_slice_if_single_index(0))
slice_obj = slice(2, 3)
self.assertIs(slice_obj,
_as_slice_if_single_index(slice_obj))
def test_slice_positional_metadata(self):
seq = Sequence('ABCDEFGHIJ',
positional_metadata={'foo': np.arange(10),
'bar': np.arange(100, 110)})
self.assertTrue(pd.DataFrame({'foo': [0], 'bar': [100]}).equals(
seq._slice_positional_metadata(0)))
self.assertTrue(pd.DataFrame({'foo': [0], 'bar': [100]}).equals(
seq._slice_positional_metadata(slice(0, 1))))
self.assertTrue(pd.DataFrame({'foo': [0, 1],
'bar': [100, 101]}).equals(
seq._slice_positional_metadata(slice(0, 2))))
self.assertTrue(pd.DataFrame(
{'foo': [9], 'bar': [109]}, index=[9]).equals(
seq._slice_positional_metadata(9)))
def test_getitem_with_int_no_positional_metadata(self):
seq = Sequence("Sequence string !1@2#3?.,",
metadata={'id': 'id2', 'description': 'no_qual'})
eseq = Sequence("t", metadata={'id': 'id2', 'description': 'no_qual'})
self.assertEqual(seq[10], eseq)
def test_getitem_with_slice_has_positional_metadata(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, metadata={'id': 'id3', 'description': 'dsc3'},
positional_metadata={'quality': np.arange(length)})
eseq = Sequence("012", metadata={'id': 'id3', 'description': 'dsc3'},
positional_metadata={'quality': np.arange(3)})
self.assertEqual(seq[0:3], eseq)
self.assertEqual(seq[:3], eseq)
self.assertEqual(seq[:3:1], eseq)
eseq = Sequence("def", metadata={'id': 'id3', 'description': 'dsc3'},
positional_metadata={'quality': [13, 14, 15]})
self.assertEqual(seq[-3:], eseq)
self.assertEqual(seq[-3::1], eseq)
eseq = Sequence("02468ace",
metadata={'id': 'id3', 'description': 'dsc3'},
positional_metadata={'quality': [0, 2, 4, 6, 8, 10,
12, 14]})
self.assertEqual(seq[0:length:2], eseq)
self.assertEqual(seq[::2], eseq)
eseq = Sequence(s[::-1], metadata={'id': 'id3', 'description': 'dsc3'},
positional_metadata={'quality':
np.arange(length)[::-1]})
self.assertEqual(seq[length::-1], eseq)
self.assertEqual(seq[::-1], eseq)
eseq = Sequence('fdb97531',
metadata={'id': 'id3', 'description': 'dsc3'},
positional_metadata={'quality': [15, 13, 11, 9, 7, 5,
3, 1]})
self.assertEqual(seq[length::-2], eseq)
self.assertEqual(seq[::-2], eseq)
self.assertEqual(seq[0:500:], seq)
eseq = Sequence('', metadata={'id': 'id3', 'description': 'dsc3'},
positional_metadata={'quality':
np.array([], dtype=np.int64)})
self.assertEqual(seq[length:0], eseq)
self.assertEqual(seq[-length:0], eseq)
self.assertEqual(seq[1:0], eseq)
eseq = Sequence("0", metadata={'id': 'id3', 'description': 'dsc3'},
positional_metadata={'quality': [0]})
self.assertEqual(seq[0:1], eseq)
self.assertEqual(seq[0:1:1], eseq)
self.assertEqual(seq[-length::-1], eseq)
def test_getitem_with_slice_no_positional_metadata(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, metadata={'id': 'id4', 'description': 'no_qual4'})
eseq = Sequence("02468ace",
metadata={'id': 'id4', 'description': 'no_qual4'})
self.assertEqual(seq[0:length:2], eseq)
self.assertEqual(seq[::2], eseq)
def test_getitem_with_tuple_of_mixed_with_positional_metadata(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, metadata={'id': 'id5', 'description': 'dsc5'},
positional_metadata={'quality': np.arange(length)})
eseq = Sequence("00000", metadata={'id': 'id5', 'description': 'dsc5'},
positional_metadata={'quality': [0, 0, 0, 0, 0]})
self.assertEqual(seq[0, 0, 0, 0, 0], eseq)
self.assertEqual(seq[0, 0:1, 0, 0, 0], eseq)
self.assertEqual(seq[0, 0:1, 0, -length::-1, 0, 1:0], eseq)
self.assertEqual(seq[0:1, 0:1, 0:1, 0:1, 0:1], eseq)
self.assertEqual(seq[0:1, 0, 0, 0, 0], eseq)
eseq = Sequence("0123fed9",
metadata={'id': 'id5', 'description': 'dsc5'},
positional_metadata={'quality': [0, 1, 2, 3, 15, 14,
13, 9]})
self.assertEqual(seq[0, 1, 2, 3, 15, 14, 13, 9], eseq)
self.assertEqual(seq[0, 1, 2, 3, :-4:-1, 9], eseq)
self.assertEqual(seq[0:4, :-4:-1, 9, 1:0], eseq)
self.assertEqual(seq[0:4, :-4:-1, 9:10], eseq)
def test_getitem_with_tuple_of_mixed_no_positional_metadata(self):
seq = Sequence("0123456789abcdef",
metadata={'id': 'id6', 'description': 'no_qual6'})
eseq = Sequence("0123fed9",
metadata={'id': 'id6', 'description': 'no_qual6'})
self.assertEqual(seq[0, 1, 2, 3, 15, 14, 13, 9], eseq)
self.assertEqual(seq[0, 1, 2, 3, :-4:-1, 9], eseq)
self.assertEqual(seq[0:4, :-4:-1, 9], eseq)
self.assertEqual(seq[0:4, :-4:-1, 9:10], eseq)
def test_getitem_with_iterable_of_mixed_has_positional_metadata(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, metadata={'id': 'id7', 'description': 'dsc7'},
positional_metadata={'quality': np.arange(length)})
def generator():
yield slice(0, 4)
yield slice(200, 400)
yield -1
yield slice(-2, -4, -1)
yield 9
eseq = Sequence("0123fed9",
metadata={'id': 'id7', 'description': 'dsc7'},
positional_metadata={'quality': [0, 1, 2, 3, 15, 14,
13, 9]})
self.assertEqual(seq[[0, 1, 2, 3, 15, 14, 13, 9]], eseq)
self.assertEqual(seq[generator()], eseq)
self.assertEqual(seq[[slice(0, 4), slice(None, -4, -1), 9]], eseq)
self.assertEqual(seq[
[slice(0, 4), slice(None, -4, -1), slice(9, 10)]], eseq)
def test_getitem_with_iterable_of_mixed_no_positional_metadata(self):
s = "0123456789abcdef"
seq = Sequence(s, metadata={'id': 'id7', 'description': 'dsc7'})
def generator():
yield slice(0, 4)
yield slice(200, 400)
yield slice(None, -4, -1)
yield 9
eseq = Sequence("0123fed9",
metadata={'id': 'id7', 'description': 'dsc7'})
self.assertEqual(seq[[0, 1, 2, 3, 15, 14, 13, 9]], eseq)
self.assertEqual(seq[generator()], eseq)
self.assertEqual(seq[[slice(0, 4), slice(None, -4, -1), 9]], eseq)
self.assertEqual(seq[
[slice(0, 4), slice(None, -4, -1), slice(9, 10)]], eseq)
def test_getitem_with_numpy_index_has_positional_metadata(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, metadata={'id': 'id9', 'description': 'dsc9'},
positional_metadata={'quality': np.arange(length)})
eseq = Sequence("0123fed9",
metadata={'id': 'id9', 'description': 'dsc9'},
positional_metadata={'quality': [0, 1, 2, 3, 15, 14,
13, 9]})
self.assertEqual(seq[np.array([0, 1, 2, 3, 15, 14, 13, 9])], eseq)
def test_getitem_with_numpy_index_no_positional_metadata(self):
s = "0123456789abcdef"
seq = Sequence(s, metadata={'id': 'id10', 'description': 'dsc10'})
eseq = Sequence("0123fed9",
metadata={'id': 'id10', 'description': 'dsc10'})
self.assertEqual(seq[np.array([0, 1, 2, 3, 15, 14, 13, 9])], eseq)
def test_getitem_with_empty_indices_empty_seq_no_pos_metadata(self):
s = ""
seq = Sequence(s, metadata={'id': 'id10', 'description': 'dsc10'})
eseq = Sequence('', metadata={'id': 'id10', 'description': 'dsc10'})
tested = 0
for index in self.getitem_empty_indices:
tested += 1
self.assertEqual(seq[index], eseq)
self.assertEqual(tested, 6)
def test_getitem_with_empty_indices_non_empty_seq_no_pos_metadata(self):
s = "0123456789abcdef"
seq = Sequence(s, metadata={'id': 'id10', 'description': 'dsc10'})
eseq = Sequence('', metadata={'id': 'id10', 'description': 'dsc10'})
tested = 0
for index in self.getitem_empty_indices:
tested += 1
self.assertEqual(seq[index], eseq)
self.assertEqual(tested, 6)
def test_getitem_with_boolean_vector_has_qual(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, metadata={'id': 'id11', 'description': 'dsc11'},
positional_metadata={'quality': np.arange(length)})
eseq = Sequence("13579bdf",
metadata={'id': 'id11', 'description': 'dsc11'},
positional_metadata={'quality': [1, 3, 5, 7, 9, 11,
13, 15]})
self.assertEqual(seq[np.array([False, True] * 8)], eseq)
self.assertEqual(seq[[False, True] * 8], eseq)
def test_getitem_with_boolean_vector_no_positional_metadata(self):
s = "0123456789abcdef"
seq = Sequence(s, metadata={'id': 'id11', 'description': 'dsc11'})
eseq = Sequence("13579bdf",
metadata={'id': 'id11', 'description': 'dsc11'})
self.assertEqual(seq[np.array([False, True] * 8)], eseq)
def test_getitem_with_invalid(self):
seq = Sequence("123456",
metadata={'id': 'idm', 'description': 'description'},
positional_metadata={'quality': [1, 2, 3, 4, 5, 6]})
with self.assertRaises(IndexError):
seq['not an index']
with self.assertRaises(IndexError):
seq[['1', '2']]
with self.assertRaises(IndexError):
seq[[1, slice(1, 2), 'a']]
with self.assertRaises(IndexError):
seq[[1, slice(1, 2), True]]
with self.assertRaises(IndexError):
seq[True]
with self.assertRaises(IndexError):
seq[np.array([True, False])]
with self.assertRaises(IndexError):
seq[999]
with self.assertRaises(IndexError):
seq[0, 0, 999]
# numpy 1.8.1 and 1.9.2 raise different error types
# (ValueError, IndexError).
with self.assertRaises(Exception):
seq[100 * [True, False, True]]
def test_getitem_handles_missing_metadata_efficiently(self):
# there are two paths in __getitem__ we need to test for efficient
# handling of missing metadata
# path 1: mixed types
seq = Sequence('ACGT')
subseq = seq[1, 2:4]
self.assertEqual(subseq, Sequence('CGT'))
# metadata attributes should be None and not initialized to a "missing"
# representation
self.assertIsNone(seq._metadata)
self.assertIsNone(seq._positional_metadata)
self.assertIsNone(subseq._metadata)
self.assertIsNone(subseq._positional_metadata)
# path 2: uniform types
seq = Sequence('ACGT')
subseq = seq[1:3]
self.assertEqual(subseq, Sequence('CG'))
self.assertIsNone(seq._metadata)
self.assertIsNone(seq._positional_metadata)
self.assertIsNone(subseq._metadata)
self.assertIsNone(subseq._positional_metadata)
def test_len(self):
self.assertEqual(len(Sequence("")), 0)
self.assertEqual(len(Sequence("a")), 1)
self.assertEqual(len(Sequence("abcdef")), 6)
def test_nonzero(self):
# blank
self.assertFalse(Sequence(""))
self.assertFalse(Sequence("",
metadata={'id': 'foo'},
positional_metadata={'quality': range(0)}))
# single
self.assertTrue(Sequence("A"))
self.assertTrue(Sequence("A",
metadata={'id': 'foo'},
positional_metadata={'quality': range(1)}))
# multi
self.assertTrue(Sequence("ACGT"))
self.assertTrue(Sequence("ACGT",
metadata={'id': 'foo'},
positional_metadata={'quality': range(4)}))
def test_contains(self):
seq = Sequence("#@ACGT,24.13**02")
tested = 0
for c in self.sequence_kinds:
tested += 1
self.assertTrue(c(',24') in seq)
self.assertTrue(c('*') in seq)
self.assertTrue(c('') in seq)
self.assertFalse(c("$") in seq)
self.assertFalse(c("AGT") in seq)
self.assertEqual(tested, 4)
def test_contains_sequence_subclass(self):
with self.assertRaises(TypeError):
SequenceSubclass("A") in Sequence("AAA")
self.assertTrue(SequenceSubclass("A").values in Sequence("AAA"))
def test_hash(self):
with self.assertRaises(TypeError):
hash(Sequence("ABCDEFG"))
self.assertNotIsInstance(Sequence("ABCDEFG"), Hashable)
def test_iter_has_positional_metadata(self):
tested = False
seq = Sequence("0123456789", metadata={'id': 'a', 'desc': 'b'},
positional_metadata={'qual': np.arange(10)})
for i, s in enumerate(seq):
tested = True
self.assertEqual(s, Sequence(str(i),
metadata={'id': 'a', 'desc': 'b'},
positional_metadata={'qual': [i]}))
self.assertTrue(tested)
def test_iter_no_positional_metadata(self):
tested = False
seq = Sequence("0123456789", metadata={'id': 'a', 'desc': 'b'})
for i, s in enumerate(seq):
tested = True
self.assertEqual(s, Sequence(str(i),
metadata={'id': 'a', 'desc': 'b'}))
self.assertTrue(tested)
def test_reversed_has_positional_metadata(self):
tested = False
seq = Sequence("0123456789", metadata={'id': 'a', 'desc': 'b'},
positional_metadata={'qual': np.arange(10)})
for i, s in enumerate(reversed(seq)):
tested = True
self.assertEqual(s, Sequence(str(9 - i),
metadata={'id': 'a', 'desc': 'b'},
positional_metadata={'qual':
[9 - i]}))
self.assertTrue(tested)
def test_reversed_no_positional_metadata(self):
tested = False
seq = Sequence("0123456789", metadata={'id': 'a', 'desc': 'b'})
for i, s in enumerate(reversed(seq)):
tested = True
self.assertEqual(s, Sequence(str(9 - i),
metadata={'id': 'a', 'desc': 'b'}))
self.assertTrue(tested)
def test_repr(self):
# basic sanity checks -- more extensive testing of formatting and
# special cases is performed in SequenceReprDoctests below. here we
# only test that pieces of the repr are present. these tests also
# exercise coverage for py2/3 since the doctests in
# SequenceReprDoctests only currently run in py2.
# minimal
obs = repr(Sequence(''))
self.assertEqual(obs.count('\n'), 4)
self.assertTrue(obs.startswith('Sequence'))
self.assertIn('length: 0', obs)
self.assertTrue(obs.endswith('-'))
# no metadata
obs = repr(Sequence('ACGT'))
self.assertEqual(obs.count('\n'), 5)
self.assertTrue(obs.startswith('Sequence'))
self.assertIn('length: 4', obs)
self.assertTrue(obs.endswith('0 ACGT'))
# metadata and positional metadata of mixed types
obs = repr(
Sequence(
'ACGT',
metadata={'foo': 'bar', u'bar': 33.33, None: True, False: {},
(1, 2): 3, 'acb' * 100: "'", 10: 11},
positional_metadata={'foo': range(4),
42: ['a', 'b', [], 'c']}))
self.assertEqual(obs.count('\n'), 16)
self.assertTrue(obs.startswith('Sequence'))
self.assertIn('None: True', obs)
self.assertIn('\'foo\': \'bar\'', obs)
self.assertIn('42: <dtype: object>', obs)
self.assertIn('\'foo\': <dtype: int64>', obs)
self.assertIn('length: 4', obs)
self.assertTrue(obs.endswith('0 ACGT'))
# sequence spanning > 5 lines
obs = repr(Sequence('A' * 301))
self.assertEqual(obs.count('\n'), 9)
self.assertTrue(obs.startswith('Sequence'))
self.assertIn('length: 301', obs)
self.assertIn('...', obs)
self.assertTrue(obs.endswith('300 A'))
def test_str(self):
self.assertEqual(str(Sequence("GATTACA")), "GATTACA")
self.assertEqual(str(Sequence("ACCGGTACC")), "ACCGGTACC")
self.assertEqual(str(Sequence("GREG")), "GREG")
self.assertEqual(
str(Sequence("ABC",
positional_metadata={'quality': [1, 2, 3]})),
"ABC")
self.assertIs(type(str(Sequence("A"))), str)
def test_to_default_behavior(self):
# minimal sequence, sequence with all optional attributes present, and
# a subclass of Sequence
for seq in (Sequence('ACGT'),
Sequence('ACGT', metadata={'id': 'foo', 'desc': 'bar'},
positional_metadata={'quality': range(4)}),
SequenceSubclass('ACGU', metadata={'id': 'rna seq'})):
to = seq._to()
self.assertEqual(seq, to)
self.assertIsNot(seq, to)
def test_to_update_single_attribute(self):
seq = Sequence('HE..--..LLO',
metadata={'id': 'hello', 'description': 'gapped hello'},
positional_metadata={'quality': range(11)})
to = seq._to(metadata={'id': 'new id'})
self.assertIsNot(seq, to)
self.assertNotEqual(seq, to)
self.assertEqual(
to,
Sequence('HE..--..LLO', metadata={'id': 'new id'},
positional_metadata={'quality': range(11)}))
# metadata shouldn't have changed on the original sequence
self.assertEqual(seq.metadata,
{'id': 'hello', 'description': 'gapped hello'})
def test_to_update_multiple_attributes(self):
seq = Sequence('HE..--..LLO',
metadata={'id': 'hello', 'description': 'gapped hello'},
positional_metadata={'quality': range(11)})
to = seq._to(metadata={'id': 'new id', 'description': 'new desc'},
positional_metadata={'quality': range(20, 25)},
sequence='ACGTA')
self.assertIsNot(seq, to)
self.assertNotEqual(seq, to)
# attributes should be what we specified in the _to call...
self.assertEqual(to.metadata['id'], 'new id')
npt.assert_array_equal(to.positional_metadata['quality'],
np.array([20, 21, 22, 23, 24]))
npt.assert_array_equal(to.values, np.array('ACGTA', dtype='c'))
self.assertEqual(to.metadata['description'], 'new desc')
# ...and shouldn't have changed on the original sequence
self.assertEqual(seq.metadata['id'], 'hello')
npt.assert_array_equal(seq.positional_metadata['quality'], range(11))
npt.assert_array_equal(seq.values, np.array('HE..--..LLO',
dtype='c'))
self.assertEqual(seq.metadata['description'], 'gapped hello')
def test_to_invalid_kwargs(self):
seq = Sequence('ACCGGTACC', metadata={'id': "test-seq",
'desc': "A test sequence"})
with self.assertRaises(TypeError):
seq._to(metadata={'id': 'bar'}, unrecognized_kwarg='baz')
def test_count(self):
def construct_char_array(s):
return np.fromstring(s, dtype='|S1')
def construct_uint8_array(s):
return np.fromstring(s, dtype=np.uint8)
seq = Sequence("1234567899876555")
tested = 0
for c in self.sequence_kinds:
tested += 1
self.assertEqual(seq.count(c('4')), 1)
self.assertEqual(seq.count(c('8')), 2)
self.assertEqual(seq.count(c('5')), 4)
self.assertEqual(seq.count(c('555')), 1)
self.assertEqual(seq.count(c('555'), 0, 4), 0)
self.assertEqual(seq.count(c('555'), start=0, end=4), 0)
self.assertEqual(seq.count(c('5'), start=10), 3)
self.assertEqual(seq.count(c('5'), end=10), 1)
with self.assertRaises(ValueError):
seq.count(c(''))
self.assertEqual(tested, 4)
def test_count_on_subclass(self):
with self.assertRaises(TypeError) as cm:
Sequence("abcd").count(SequenceSubclass("a"))
self.assertIn("Sequence", str(cm.exception))
self.assertIn("SequenceSubclass", str(cm.exception))
def test_lowercase_mungeable_key(self):
# NOTE: This test relies on Sequence._munge_to_index_array working
# properly. If the internal implementation of the lowercase method
# changes to no longer use _munge_to_index_array, this test may need
# to be updated to cover cases currently covered by
# _munge_to_index_array
self.assertEqual('AAAAaaaa', self.lowercase_seq.lowercase('key'))
def test_lowercase_array_key(self):
# NOTE: This test relies on Sequence._munge_to_index_array working
# properly. If the internal implementation of the lowercase method
# changes to no longer use _munge_to_index_array, this test may need
# to be updated to cover cases currently covered by
# _munge_to_index_array
self.assertEqual('aaAAaaaa',
self.lowercase_seq.lowercase(
np.array([True, True, False, False, True, True,
True, True])))
self.assertEqual('AaAAaAAA',
self.lowercase_seq.lowercase([1, 4]))
def test_distance(self):
tested = 0
for constructor in self.sequence_kinds:
tested += 1
seq1 = Sequence("abcdef")
seq2 = constructor("12bcef")
self.assertIsInstance(seq1.distance(seq1), float)
self.assertEqual(seq1.distance(seq2), 2.0/3.0)
self.assertEqual(tested, 4)
def test_distance_arbitrary_function(self):
def metric(x, y):
return len(x) ** 2 + len(y) ** 2
seq1 = Sequence("12345678")
seq2 = Sequence("1234")
result = seq1.distance(seq2, metric=metric)
self.assertIsInstance(result, float)
self.assertEqual(result, 80.0)
def test_distance_default_metric(self):
seq1 = Sequence("abcdef")
seq2 = Sequence("12bcef")
seq_wrong = Sequence("abcdefghijklmnop")
self.assertIsInstance(seq1.distance(seq1), float)
self.assertEqual(seq1.distance(seq1), 0.0)
self.assertEqual(seq1.distance(seq2), 2.0/3.0)
with self.assertRaises(ValueError):
seq1.distance(seq_wrong)
with self.assertRaises(ValueError):
seq_wrong.distance(seq1)
def test_distance_on_subclass(self):
seq1 = Sequence("abcdef")
seq2 = SequenceSubclass("12bcef")
with self.assertRaises(TypeError):
seq1.distance(seq2)
def test_matches(self):
tested = 0
for constructor in self.sequence_kinds:
tested += 1
seq1 = Sequence("AACCEEGG")
seq2 = constructor("ABCDEFGH")
expected = np.array([True, False] * 4)
npt.assert_equal(seq1.matches(seq2), expected)
self.assertEqual(tested, 4)
def test_matches_on_subclass(self):
seq1 = Sequence("AACCEEGG")
seq2 = SequenceSubclass("ABCDEFGH")
with self.assertRaises(TypeError):
seq1.matches(seq2)
def test_matches_unequal_length(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("TOOLONGTOCOMPARE")
with self.assertRaises(ValueError):
seq1.matches(seq2)
def test_mismatches(self):
tested = 0
for constructor in self.sequence_kinds:
tested += 1
seq1 = Sequence("AACCEEGG")
seq2 = constructor("ABCDEFGH")
expected = np.array([False, True] * 4)
npt.assert_equal(seq1.mismatches(seq2), expected)
self.assertEqual(tested, 4)
def test_mismatches_on_subclass(self):
seq1 = Sequence("AACCEEGG")
seq2 = SequenceSubclass("ABCDEFGH")
with self.assertRaises(TypeError):
seq1.mismatches(seq2)
def test_mismatches_unequal_length(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("TOOLONGTOCOMPARE")
with self.assertRaises(ValueError):
seq1.mismatches(seq2)
def test_mismatch_frequency(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("ABCDEFGH")
seq3 = Sequence("TTTTTTTT")
self.assertIs(type(seq1.mismatch_frequency(seq1)), int)
self.assertEqual(seq1.mismatch_frequency(seq1), 0)
self.assertEqual(seq1.mismatch_frequency(seq2), 4)
self.assertEqual(seq1.mismatch_frequency(seq3), 8)
def test_mismatch_frequency_relative(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("ABCDEFGH")
seq3 = Sequence("TTTTTTTT")
self.assertIs(type(seq1.mismatch_frequency(seq1, relative=True)),
float)
self.assertEqual(seq1.mismatch_frequency(seq1, relative=True), 0.0)
self.assertEqual(seq1.mismatch_frequency(seq2, relative=True), 0.5)
self.assertEqual(seq1.mismatch_frequency(seq3, relative=True), 1.0)
def test_mismatch_frequency_unequal_length(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("TOOLONGTOCOMPARE")
with self.assertRaises(ValueError):
seq1.mismatch_frequency(seq2)
def test_mismatch_frequence_on_subclass(self):
seq1 = Sequence("AACCEEGG")
seq2 = SequenceSubclass("ABCDEFGH")
with self.assertRaises(TypeError):
seq1.mismatch_frequency(seq2)
def test_match_frequency(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("ABCDEFGH")
seq3 = Sequence("TTTTTTTT")
self.assertIs(type(seq1.match_frequency(seq1)), int)
self.assertEqual(seq1.match_frequency(seq1), 8)
self.assertEqual(seq1.match_frequency(seq2), 4)
self.assertEqual(seq1.match_frequency(seq3), 0)
def test_match_frequency_relative(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("ABCDEFGH")
seq3 = Sequence("TTTTTTTT")
self.assertIs(type(seq1.match_frequency(seq1, relative=True)),
float)
self.assertEqual(seq1.match_frequency(seq1, relative=True), 1.0)
self.assertEqual(seq1.match_frequency(seq2, relative=True), 0.5)
self.assertEqual(seq1.match_frequency(seq3, relative=True), 0.0)
def test_match_frequency_unequal_length(self):
seq1 = Sequence("AACCEEGG")
seq2 = Sequence("TOOLONGTOCOMPARE")
with self.assertRaises(ValueError):
seq1.match_frequency(seq2)
def test_match_frequency_on_subclass(self):
seq1 = Sequence("AACCEEGG")
seq2 = SequenceSubclass("ABCDEFGH")
with self.assertRaises(TypeError):
seq1.match_frequency(seq2)
def test_index(self):
tested = 0
for c in self.sequence_kinds:
tested += 1
seq = Sequence("ABCDEFG@@ABCDFOO")
self.assertEqual(seq.index(c("A")), 0)
self.assertEqual(seq.index(c("@")), 7)
self.assertEqual(seq.index(c("@@")), 7)
with self.assertRaises(ValueError):
seq.index("A", start=1, end=5)
self.assertEqual(tested, 4)
def test_index_on_subclass(self):
with self.assertRaises(TypeError):
Sequence("ABCDEFG").index(SequenceSubclass("A"))
self.assertEqual(
SequenceSubclass("ABCDEFG").index(SequenceSubclass("A")), 0)
def _compare_kmers_results(self, observed, expected):
for obs, exp in zip_longest(observed, expected, fillvalue=None):
self.assertEqual(obs, exp)
def test_iter_kmers(self):
seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
expected = [
Sequence('G', positional_metadata={'quality': [0]}),
Sequence('A', positional_metadata={'quality': [1]}),
Sequence('T', positional_metadata={'quality': [2]}),
Sequence('T', positional_metadata={'quality': [3]}),
Sequence('A', positional_metadata={'quality': [4]}),
Sequence('C', positional_metadata={'quality': [5]}),
Sequence('A', positional_metadata={'quality': [6]})
]
self._compare_kmers_results(
seq.iter_kmers(1, overlap=False), expected)
expected = [
Sequence('GA', positional_metadata={'quality': [0, 1]}),
Sequence('TT', positional_metadata={'quality': [2, 3]}),
Sequence('AC', positional_metadata={'quality': [4, 5]})
]
self._compare_kmers_results(
seq.iter_kmers(2, overlap=False), expected)
expected = [
Sequence('GAT', positional_metadata={'quality': [0, 1, 2]}),
Sequence('TAC', positional_metadata={'quality': [3, 4, 5]})
]
self._compare_kmers_results(
seq.iter_kmers(3, overlap=False), expected)
expected = [
Sequence('GATTACA',
positional_metadata={'quality': [0, 1, 2, 3, 4, 5, 6]})
]
self._compare_kmers_results(
seq.iter_kmers(7, overlap=False), expected)
expected = []
self._compare_kmers_results(
seq.iter_kmers(8, overlap=False), expected)
self.assertIs(type(seq.iter_kmers(1)), GeneratorType)
def test_iter_kmers_no_positional_metadata(self):
seq = Sequence('GATTACA')
expected = [
Sequence('G'),
Sequence('A'),
Sequence('T'),
Sequence('T'),
Sequence('A'),
Sequence('C'),
Sequence('A')
]
self._compare_kmers_results(
seq.iter_kmers(1, overlap=False), expected)
expected = [
Sequence('GA'),
Sequence('TT'),
Sequence('AC')
]
self._compare_kmers_results(
seq.iter_kmers(2, overlap=False), expected)
expected = [
Sequence('GAT'),
Sequence('TAC')
]
self._compare_kmers_results(
seq.iter_kmers(3, overlap=False), expected)
expected = [
Sequence('GATTACA')
]
self._compare_kmers_results(
seq.iter_kmers(7, overlap=False), expected)
expected = []
self._compare_kmers_results(
seq.iter_kmers(8, overlap=False), expected)
self.assertIs(type(seq.iter_kmers(1)), GeneratorType)
def test_iter_kmers_with_overlap(self):
seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
expected = [
Sequence('G', positional_metadata={'quality': [0]}),
Sequence('A', positional_metadata={'quality': [1]}),
Sequence('T', positional_metadata={'quality': [2]}),
Sequence('T', positional_metadata={'quality': [3]}),
Sequence('A', positional_metadata={'quality': [4]}),
Sequence('C', positional_metadata={'quality': [5]}),
Sequence('A', positional_metadata={'quality': [6]})
]
self._compare_kmers_results(
seq.iter_kmers(1, overlap=True), expected)
expected = [
Sequence('GA', positional_metadata={'quality': [0, 1]}),
Sequence('AT', positional_metadata={'quality': [1, 2]}),
Sequence('TT', positional_metadata={'quality': [2, 3]}),
Sequence('TA', positional_metadata={'quality': [3, 4]}),
Sequence('AC', positional_metadata={'quality': [4, 5]}),
Sequence('CA', positional_metadata={'quality': [5, 6]})
]
self._compare_kmers_results(
seq.iter_kmers(2, overlap=True), expected)
expected = [
Sequence('GAT', positional_metadata={'quality': [0, 1, 2]}),
Sequence('ATT', positional_metadata={'quality': [1, 2, 3]}),
Sequence('TTA', positional_metadata={'quality': [2, 3, 4]}),
Sequence('TAC', positional_metadata={'quality': [3, 4, 5]}),
Sequence('ACA', positional_metadata={'quality': [4, 5, 6]})
]
self._compare_kmers_results(
seq.iter_kmers(3, overlap=True), expected)
expected = [
Sequence('GATTACA',
positional_metadata={'quality': [0, 1, 2, 3, 4, 5, 6]})
]
self._compare_kmers_results(
seq.iter_kmers(7, overlap=True), expected)
expected = []
self._compare_kmers_results(
seq.iter_kmers(8, overlap=True), expected)
self.assertIs(type(seq.iter_kmers(1)), GeneratorType)
def test_iter_kmers_with_overlap_no_positional_metadata(self):
seq = Sequence('GATTACA')
expected = [
Sequence('G'),
Sequence('A'),
Sequence('T'),
Sequence('T'),
Sequence('A'),
Sequence('C'),
Sequence('A')
]
self._compare_kmers_results(
seq.iter_kmers(1, overlap=True), expected)
expected = [
Sequence('GA'),
Sequence('AT'),
Sequence('TT'),
Sequence('TA'),
Sequence('AC'),
Sequence('CA')
]
self._compare_kmers_results(
seq.iter_kmers(2, overlap=True), expected)
expected = [
Sequence('GAT'),
Sequence('ATT'),
Sequence('TTA'),
Sequence('TAC'),
Sequence('ACA')
]
self._compare_kmers_results(
seq.iter_kmers(3, overlap=True), expected)
expected = [
Sequence('GATTACA')
]
self._compare_kmers_results(
seq.iter_kmers(7, overlap=True), expected)
expected = []
self._compare_kmers_results(
seq.iter_kmers(8, overlap=True), expected)
self.assertIs(type(seq.iter_kmers(1)), GeneratorType)
def test_iter_kmers_invalid_k(self):
seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
with self.assertRaises(ValueError):
list(seq.iter_kmers(0))
with self.assertRaises(ValueError):
list(seq.iter_kmers(-42))
def test_iter_kmers_invalid_k_no_positional_metadata(self):
seq = Sequence('GATTACA')
with self.assertRaises(ValueError):
list(seq.iter_kmers(0))
with self.assertRaises(ValueError):
list(seq.iter_kmers(-42))
def test_iter_kmers_different_sequences(self):
seq = Sequence('HE..--..LLO',
metadata={'id': 'hello', 'desc': 'gapped hello'},
positional_metadata={'quality': range(11)})
expected = [
Sequence('HE.', positional_metadata={'quality': [0, 1, 2]},
metadata={'id': 'hello', 'desc': 'gapped hello'}),
Sequence('.--', positional_metadata={'quality': [3, 4, 5]},
metadata={'id': 'hello', 'desc': 'gapped hello'}),
Sequence('..L', positional_metadata={'quality': [6, 7, 8]},
metadata={'id': 'hello', 'desc': 'gapped hello'})
]
self._compare_kmers_results(seq.iter_kmers(3, overlap=False), expected)
def test_iter_kmers_different_sequences_no_positional_metadata(self):
seq = Sequence('HE..--..LLO',
metadata={'id': 'hello', 'desc': 'gapped hello'})
expected = [
Sequence('HE.',
metadata={'id': 'hello', 'desc': 'gapped hello'}),
Sequence('.--',
metadata={'id': 'hello', 'desc': 'gapped hello'}),
Sequence('..L',
metadata={'id': 'hello', 'desc': 'gapped hello'})
]
self._compare_kmers_results(seq.iter_kmers(3, overlap=False), expected)
def test_kmer_frequencies(self):
seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
# overlap = True
expected = Counter('GATTACA')
self.assertEqual(seq.kmer_frequencies(1, overlap=True), expected)
expected = Counter(['GAT', 'ATT', 'TTA', 'TAC', 'ACA'])
self.assertEqual(seq.kmer_frequencies(3, overlap=True), expected)
expected = Counter([])
self.assertEqual(seq.kmer_frequencies(8, overlap=True), expected)
# overlap = False
expected = Counter(['GAT', 'TAC'])
self.assertEqual(seq.kmer_frequencies(3, overlap=False), expected)
expected = Counter(['GATTACA'])
self.assertEqual(seq.kmer_frequencies(7, overlap=False), expected)
expected = Counter([])
self.assertEqual(seq.kmer_frequencies(8, overlap=False), expected)
def test_kmer_frequencies_relative(self):
seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
# overlap = True
expected = defaultdict(float)
expected['A'] = 3/7.
expected['C'] = 1/7.
expected['G'] = 1/7.
expected['T'] = 2/7.
self.assertEqual(seq.kmer_frequencies(1, overlap=True, relative=True),
expected)
expected = defaultdict(float)
expected['GAT'] = 1/5.
expected['ATT'] = 1/5.
expected['TTA'] = 1/5.
expected['TAC'] = 1/5.
expected['ACA'] = 1/5.
self.assertEqual(seq.kmer_frequencies(3, overlap=True, relative=True),
expected)
expected = defaultdict(float)
self.assertEqual(seq.kmer_frequencies(8, overlap=True, relative=True),
expected)
# overlap = False
expected = defaultdict(float)
expected['GAT'] = 1/2.
expected['TAC'] = 1/2.
self.assertEqual(seq.kmer_frequencies(3, overlap=False, relative=True),
expected)
expected = defaultdict(float)
expected['GATTACA'] = 1.0
self.assertEqual(seq.kmer_frequencies(7, overlap=False, relative=True),
expected)
expected = defaultdict(float)
self.assertEqual(seq.kmer_frequencies(8, overlap=False, relative=True),
expected)
def test_kmer_frequencies_floating_point_precision(self):
# Test that a sequence having no variation in k-words yields a
# frequency of exactly 1.0. Note that it is important to use
# self.assertEqual here instead of self.assertAlmostEqual because we
# want to test for exactly 1.0. A previous implementation of
# Sequence.kmer_frequencies(relative=True) added (1 / num_words) for
# each occurrence of a k-word to compute the frequencies (see
# https://github.com/biocore/scikit-bio/issues/801). In certain cases,
# this yielded a frequency slightly less than 1.0 due to roundoff
# error. The test case here uses a sequence with 10 characters that are
# all identical and computes k-word frequencies with k=1. This test
# case exposes the roundoff error present in the previous
# implementation because there are 10 k-words (which are all
# identical), so 1/10 added 10 times yields a number slightly less than
# 1.0. This occurs because 1/10 cannot be represented exactly as a
# floating point number.
seq = Sequence('AAAAAAAAAA')
self.assertEqual(seq.kmer_frequencies(1, relative=True),
defaultdict(float, {'A': 1.0}))
def test_find_with_regex(self):
seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
pat = re.compile('(T+A)(CA)')
obs = list(seq.find_with_regex(pat))
exp = [slice(2, 5), slice(5, 7)]
self.assertEqual(obs, exp)
self.assertIs(type(seq.find_with_regex(pat)), GeneratorType)
def test_find_with_regex_string_as_input(self):
seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
pat = '(T+A)(CA)'
obs = list(seq.find_with_regex(pat))
exp = [slice(2, 5), slice(5, 7)]
self.assertEqual(obs, exp)
self.assertIs(type(seq.find_with_regex(pat)), GeneratorType)
def test_find_with_regex_no_groups(self):
seq = Sequence('GATTACA', positional_metadata={'quality': range(7)})
pat = re.compile('(FOO)')
self.assertEqual(list(seq.find_with_regex(pat)), [])
def test_find_with_regex_ignore_no_difference(self):
seq = Sequence('..ABCDEFG..')
pat = "([A-Z]+)"
exp = [slice(2, 9)]
self.assertEqual(list(seq.find_with_regex(pat)), exp)
obs = seq.find_with_regex(
pat, ignore=np.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1],
dtype=bool))
self.assertEqual(list(obs), exp)
def test_find_with_regex_ignore(self):
obs = Sequence('A..A..BBAAB.A..AB..A.').find_with_regex(
"(A+)", ignore=np.array([0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1,
1, 0, 0, 1, 1, 0, 1], dtype=bool))
self.assertEqual(list(obs), [slice(0, 4), slice(8, 10), slice(12, 16),
slice(19, 20)])
def test_find_with_regex_ignore_index_array(self):
obs = Sequence('A..A..BBAAB.A..AB..A.').find_with_regex(
"(A+)", ignore=np.array([1, 2, 4, 5, 11, 13, 14, 17, 18, 20]))
self.assertEqual(list(obs), [slice(0, 4), slice(8, 10), slice(12, 16),
slice(19, 20)])
def test_iter_contiguous_index_array(self):
s = Sequence("0123456789abcdef")
for c in list, tuple, np.array, pd.Series:
exp = [Sequence("0123"), Sequence("89ab")]
obs = s.iter_contiguous(c([0, 1, 2, 3, 8, 9, 10, 11]))
self.assertEqual(list(obs), exp)
def test_iter_contiguous_boolean_vector(self):
s = Sequence("0123456789abcdef")
for c in list, tuple, np.array, pd.Series:
exp = [Sequence("0123"), Sequence("89ab")]
obs = s.iter_contiguous(c(([True] * 4 + [False] * 4) * 2))
self.assertEqual(list(obs), exp)
def test_iter_contiguous_iterable_slices(self):
def spaced_out():
yield slice(0, 4)
yield slice(8, 12)
def contiguous():
yield slice(0, 4)
yield slice(4, 8)
yield slice(12, 16)
s = Sequence("0123456789abcdef")
for c in (lambda x: x, list, tuple, lambda x: np.array(tuple(x)),
lambda x: pd.Series(tuple(x))):
exp = [Sequence("0123"), Sequence("89ab")]
obs = s.iter_contiguous(c(spaced_out()))
self.assertEqual(list(obs), exp)
exp = [Sequence("01234567"), Sequence("cdef")]
obs = s.iter_contiguous(c(contiguous()))
self.assertEqual(list(obs), exp)
def test_iter_contiguous_with_max_length(self):
s = Sequence("0123456789abcdef")
for c in list, tuple, np.array, pd.Series:
exp = [Sequence("234"), Sequence("678"), Sequence("abc")]
obs = s.iter_contiguous(c([True, False, True, True] * 4),
min_length=3)
self.assertEqual(list(obs), exp)
exp = [Sequence("0"), Sequence("234"), Sequence("678"),
Sequence("abc"), Sequence("ef")]
obs1 = list(s.iter_contiguous(c([True, False, True, True] * 4),
min_length=1))
obs2 = list(s.iter_contiguous(c([True, False, True, True] * 4)))
self.assertEqual(obs1, obs2)
self.assertEqual(obs1, exp)
def test_iter_contiguous_with_invert(self):
def spaced_out():
yield slice(0, 4)
yield slice(8, 12)
def contiguous():
yield slice(0, 4)
yield slice(4, 8)
yield slice(12, 16)
s = Sequence("0123456789abcdef")
for c in (lambda x: x, list, tuple, lambda x: np.array(tuple(x)),
lambda x: pd.Series(tuple(x))):
exp = [Sequence("4567"), Sequence("cdef")]
obs = s.iter_contiguous(c(spaced_out()), invert=True)
self.assertEqual(list(obs), exp)
exp = [Sequence("89ab")]
obs = s.iter_contiguous(c(contiguous()), invert=True)
self.assertEqual(list(obs), exp)
def test_has_metadata(self):
# truly missing
seq = Sequence('ACGT')
self.assertFalse(seq.has_metadata())
# metadata attribute should be None and not initialized to a "missing"
# representation
self.assertIsNone(seq._metadata)
# looks empty
seq = Sequence('ACGT', metadata={})
self.assertFalse(seq.has_metadata())
# metadata is present
seq = Sequence('ACGT', metadata={'foo': 42})
self.assertTrue(seq.has_metadata())
def test_has_positional_metadata(self):
# truly missing
seq = Sequence('ACGT')
self.assertFalse(seq.has_positional_metadata())
# positional metadata attribute should be None and not initialized to a
# "missing" representation
self.assertIsNone(seq._positional_metadata)
# looks empty
seq = Sequence('ACGT',
positional_metadata=pd.DataFrame(index=np.arange(4)))
self.assertFalse(seq.has_positional_metadata())
# positional metadata is present
seq = Sequence('ACGT', positional_metadata={'foo': [1, 2, 3, 4]})
self.assertTrue(seq.has_positional_metadata())
def test_copy_without_metadata(self):
# shallow vs deep copy with sequence only should be equivalent. thus,
# copy.copy, copy.deepcopy, and Sequence.copy(deep=True|False) should
# all be equivalent
for copy_method in (lambda seq: seq.copy(deep=False),
lambda seq: seq.copy(deep=True),
copy.copy, copy.deepcopy):
seq = Sequence('ACGT')
seq_copy = copy_method(seq)
self.assertEqual(seq_copy, seq)
self.assertIsNot(seq_copy, seq)
self.assertIsNot(seq_copy._bytes, seq._bytes)
# metadata attributes should be None and not initialized to a
# "missing" representation
self.assertIsNone(seq._metadata)
self.assertIsNone(seq._positional_metadata)
self.assertIsNone(seq_copy._metadata)
self.assertIsNone(seq_copy._positional_metadata)
def test_copy_with_metadata_shallow(self):
# copy.copy and Sequence.copy should behave identically
for copy_method in lambda seq: seq.copy(), copy.copy:
seq = Sequence('ACGT', metadata={'foo': [1]},
positional_metadata={'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]})
seq_copy = copy_method(seq)
self.assertEqual(seq_copy, seq)
self.assertIsNot(seq_copy, seq)
self.assertIsNot(seq_copy._bytes, seq._bytes)
self.assertIsNot(seq_copy._metadata, seq._metadata)
self.assertIsNot(seq_copy._positional_metadata,
seq._positional_metadata)
self.assertIsNot(seq_copy._positional_metadata.values,
seq._positional_metadata.values)
self.assertIs(seq_copy._metadata['foo'], seq._metadata['foo'])
self.assertIs(seq_copy._positional_metadata.loc[0, 'bar'],
seq._positional_metadata.loc[0, 'bar'])
seq_copy.metadata['foo'].append(2)
seq_copy.metadata['foo2'] = 42
self.assertEqual(seq_copy.metadata, {'foo': [1, 2], 'foo2': 42})
self.assertEqual(seq.metadata, {'foo': [1, 2]})
seq_copy.positional_metadata.loc[0, 'bar'].append(1)
seq_copy.positional_metadata.loc[0, 'baz'] = 43
assert_data_frame_almost_equal(
seq_copy.positional_metadata,
pd.DataFrame({'bar': [[1], [], [], []],
'baz': [43, 42, 42, 42]}))
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'bar': [[1], [], [], []],
'baz': [42, 42, 42, 42]}))
def test_copy_with_metadata_deep(self):
# copy.deepcopy and Sequence.copy(deep=True) should behave identically
for copy_method in lambda seq: seq.copy(deep=True), copy.deepcopy:
seq = Sequence('ACGT', metadata={'foo': [1]},
positional_metadata={'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]})
seq_copy = copy_method(seq)
self.assertEqual(seq_copy, seq)
self.assertIsNot(seq_copy, seq)
self.assertIsNot(seq_copy._bytes, seq._bytes)
self.assertIsNot(seq_copy._metadata, seq._metadata)
self.assertIsNot(seq_copy._positional_metadata,
seq._positional_metadata)
self.assertIsNot(seq_copy._positional_metadata.values,
seq._positional_metadata.values)
self.assertIsNot(seq_copy._metadata['foo'], seq._metadata['foo'])
self.assertIsNot(seq_copy._positional_metadata.loc[0, 'bar'],
seq._positional_metadata.loc[0, 'bar'])
seq_copy.metadata['foo'].append(2)
seq_copy.metadata['foo2'] = 42
self.assertEqual(seq_copy.metadata, {'foo': [1, 2], 'foo2': 42})
self.assertEqual(seq.metadata, {'foo': [1]})
seq_copy.positional_metadata.loc[0, 'bar'].append(1)
seq_copy.positional_metadata.loc[0, 'baz'] = 43
assert_data_frame_almost_equal(
seq_copy.positional_metadata,
pd.DataFrame({'bar': [[1], [], [], []],
'baz': [43, 42, 42, 42]}))
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]}))
def test_deepcopy_memo_is_respected(self):
# basic test to ensure deepcopy's memo is passed through to recursive
# deepcopy calls
seq = Sequence('ACGT', metadata={'foo': 'bar'})
memo = {}
copy.deepcopy(seq, memo)
self.assertGreater(len(memo), 2)
def test_munge_to_index_array_valid_index_array(self):
s = Sequence('123456')
for c in list, tuple, np.array, pd.Series:
exp = np.array([1, 2, 3], dtype=int)
obs = s._munge_to_index_array(c([1, 2, 3]))
npt.assert_equal(obs, exp)
exp = np.array([1, 3, 5], dtype=int)
obs = s._munge_to_index_array(c([1, 3, 5]))
npt.assert_equal(obs, exp)
def test_munge_to_index_array_invalid_index_array(self):
s = Sequence("12345678")
for c in list, tuple, np.array, pd.Series:
with self.assertRaises(ValueError):
s._munge_to_index_array(c([3, 2, 1]))
with self.assertRaises(ValueError):
s._munge_to_index_array(c([5, 6, 7, 2]))
with self.assertRaises(ValueError):
s._munge_to_index_array(c([0, 1, 2, 1]))
def test_munge_to_index_array_valid_bool_array(self):
s = Sequence('123456')
for c in list, tuple, np.array, pd.Series:
exp = np.array([2, 3, 5], dtype=int)
obs = s._munge_to_index_array(
c([False, False, True, True, False, True]))
npt.assert_equal(obs, exp)
exp = np.array([], dtype=int)
obs = s._munge_to_index_array(
c([False] * 6))
npt.assert_equal(obs, exp)
exp = np.arange(6)
obs = s._munge_to_index_array(
c([True] * 6))
npt.assert_equal(obs, exp)
def test_munge_to_index_array_invalid_bool_array(self):
s = Sequence('123456')
for c in (list, tuple, lambda x: np.array(x, dtype=bool),
lambda x: pd.Series(x, dtype=bool)):
with self.assertRaises(ValueError):
s._munge_to_index_array(c([]))
with self.assertRaises(ValueError):
s._munge_to_index_array(c([True]))
with self.assertRaises(ValueError):
s._munge_to_index_array(c([True] * 10))
def test_munge_to_index_array_valid_iterable(self):
s = Sequence('')
def slices_only():
return (slice(i, i+1) for i in range(0, 10, 2))
def mixed():
return (slice(i, i+1) if i % 2 == 0 else i for i in range(10))
def unthinkable():
for i in range(10):
if i % 3 == 0:
yield slice(i, i+1)
elif i % 3 == 1:
yield i
else:
yield np.array([i], dtype=int)
for c in (lambda x: x, list, tuple, lambda x: np.array(tuple(x)),
lambda x: pd.Series(tuple(x))):
exp = np.arange(10, dtype=int)
obs = s._munge_to_index_array(c(mixed()))
npt.assert_equal(obs, exp)
exp = np.arange(10, dtype=int)
obs = s._munge_to_index_array(c(unthinkable()))
npt.assert_equal(obs, exp)
exp = np.arange(10, step=2, dtype=int)
obs = s._munge_to_index_array(c(slices_only()))
npt.assert_equal(obs, exp)
def test_munge_to_index_array_invalid_iterable(self):
s = Sequence('')
def bad1():
yield "r"
yield [1, 2, 3]
def bad2():
yield 1
yield 'str'
def bad3():
yield False
yield True
yield 2
def bad4():
yield np.array([False, True])
yield slice(2, 5)
for c in (lambda x: x, list, tuple, lambda x: np.array(tuple(x)),
lambda x: pd.Series(tuple(x))):
with self.assertRaises(TypeError):
s._munge_to_index_array(bad1())
with self.assertRaises(TypeError):
s._munge_to_index_array(bad2())
with self.assertRaises(TypeError):
s._munge_to_index_array(bad3())
with self.assertRaises(TypeError):
s._munge_to_index_array(bad4())
def test_munge_to_index_array_valid_string(self):
seq = Sequence('ACGTACGT',
positional_metadata={'introns': [False, True, True,
False, False, True,
False, False]})
npt.assert_equal(np.array([1, 2, 5]),
seq._munge_to_index_array('introns'))
seq.positional_metadata['exons'] = ~seq.positional_metadata['introns']
npt.assert_equal(np.array([0, 3, 4, 6, 7]),
seq._munge_to_index_array('exons'))
def test_munge_to_index_array_invalid_string(self):
seq_str = 'ACGT'
seq = Sequence(seq_str,
positional_metadata={'quality': range(len(seq_str))})
with six.assertRaisesRegex(self, ValueError,
"No positional metadata associated with "
"key 'introns'"):
seq._munge_to_index_array('introns')
with six.assertRaisesRegex(self, TypeError,
"Column 'quality' in positional metadata "
"does not correspond to a boolean "
"vector"):
seq._munge_to_index_array('quality')
def test_munge_to_bytestring_return_bytes(self):
seq = Sequence('')
m = 'dummy_method'
str_inputs = ('', 'a', 'acgt')
unicode_inputs = (u'', u'a', u'acgt')
byte_inputs = (b'', b'a', b'acgt')
seq_inputs = (Sequence(''), Sequence('a'), Sequence('acgt'))
all_inputs = str_inputs + unicode_inputs + byte_inputs + seq_inputs
all_expected = [b'', b'a', b'acgt'] * 4
for input_, expected in zip(all_inputs, all_expected):
observed = seq._munge_to_bytestring(input_, m)
self.assertEqual(observed, expected)
self.assertIs(type(observed), bytes)
def test_munge_to_bytestring_unicode_out_of_ascii_range(self):
seq = Sequence('')
all_inputs = (u'\x80', u'abc\x80', u'\x80abc')
for input_ in all_inputs:
with six.assertRaisesRegex(self, UnicodeEncodeError,
"'ascii' codec can't encode character"
".*in position.*: ordinal not in"
" range\(128\)"):
seq._munge_to_bytestring(input_, 'dummy_method')
# NOTE: this must be a *separate* class for doctests only (no unit tests). nose
# will not run the unit tests otherwise
#
# these doctests exercise the correct formatting of Sequence's repr in a
# variety of situations. they are more extensive than the unit tests above
# (TestSequence.test_repr) but are only currently run in py2. thus, they cannot
# be relied upon for coverage (the unit tests take care of this)
class SequenceReprDoctests(object):
r"""
>>> import pandas as pd
>>> from skbio import Sequence
Empty (minimal) sequence:
>>> Sequence('')
Sequence
-------------
Stats:
length: 0
-------------
Single character sequence:
>>> Sequence('G')
Sequence
-------------
Stats:
length: 1
-------------
0 G
Multicharacter sequence:
>>> Sequence('ACGT')
Sequence
-------------
Stats:
length: 4
-------------
0 ACGT
Full single line:
>>> Sequence('A' * 60)
Sequence
-------------------------------------------------------------------
Stats:
length: 60
-------------------------------------------------------------------
0 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
Full single line with 1 character overflow:
>>> Sequence('A' * 61)
Sequence
--------------------------------------------------------------------
Stats:
length: 61
--------------------------------------------------------------------
0 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
60 A
Two full lines:
>>> Sequence('T' * 120)
Sequence
--------------------------------------------------------------------
Stats:
length: 120
--------------------------------------------------------------------
0 TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT
60 TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT
Two full lines with 1 character overflow:
>>> Sequence('T' * 121)
Sequence
---------------------------------------------------------------------
Stats:
length: 121
---------------------------------------------------------------------
0 TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT
60 TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT TTTTTTTTTT
120 T
Five full lines (maximum amount of information):
>>> Sequence('A' * 300)
Sequence
---------------------------------------------------------------------
Stats:
length: 300
---------------------------------------------------------------------
0 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
60 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
120 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
180 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
240 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
Six lines starts "summarized" output:
>>> Sequence('A' * 301)
Sequence
---------------------------------------------------------------------
Stats:
length: 301
---------------------------------------------------------------------
0 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
60 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
...
240 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
300 A
A naive algorithm would assume the width of the first column (noting
position) based on the sequence's length alone. This can be off by one if
the last position (in the last line) has a shorter width than the width
calculated from the sequence's length. This test case ensures that only a
single space is inserted between position 99960 and the first sequence
chunk:
>>> Sequence('A' * 100000)
Sequence
-----------------------------------------------------------------------
Stats:
length: 100000
-----------------------------------------------------------------------
0 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
60 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
...
99900 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
99960 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
The largest sequence that can be displayed using six chunks per line:
>>> Sequence('A' * 100020)
Sequence
-----------------------------------------------------------------------
Stats:
length: 100020
-----------------------------------------------------------------------
0 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
60 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
...
99900 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
99960 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
A single character longer than the previous sequence causes the optimal
number of chunks per line to be 5:
>>> Sequence('A' * 100021)
Sequence
-------------------------------------------------------------
Stats:
length: 100021
-------------------------------------------------------------
0 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
50 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
...
99950 AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA
100000 AAAAAAAAAA AAAAAAAAAA A
Wide range of characters (locale-independent):
>>> import string
>>> Sequence((string.ascii_letters + string.punctuation + string.digits +
... 'a space') * 567)
Sequence
-----------------------------------------------------------------------
Stats:
length: 57267
-----------------------------------------------------------------------
0 abcdefghij klmnopqrst uvwxyzABCD EFGHIJKLMN OPQRSTUVWX YZ!"#$%&'(
60 )*+,-./:;< =>?@[\]^_` {|}~012345 6789a spac eabcdefghi jklmnopqrs
...
57180 opqrstuvwx yzABCDEFGH IJKLMNOPQR STUVWXYZ!" #$%&'()*+, -./:;<=>?@
57240 [\]^_`{|}~ 0123456789 a space
Supply horrendous metadata and positional metadata to exercise a variety of
metadata formatting cases and rules. Sorting should be by type, then by
value within each type (Python 3 doesn't allow sorting of mixed types):
>>> metadata = {
... # str key, str value
... 'abc': 'some description',
... # int value
... 'foo': 42,
... # unsupported type (dict) value
... 'bar': {},
... # int key, wrapped str (single line)
... 42: 'some words to test text wrapping and such... yada yada yada '
... 'yada yada yada yada yada.',
... # bool key, wrapped str (multi-line)
... True: 'abc ' * 34,
... # float key, truncated str (too long)
... 42.5: 'abc ' * 200,
... # unsupported type (tuple) key, unsupported type (list) value
... ('foo', 'bar'): [1, 2, 3],
... # bytes key, single long word that wraps
... b'long word': 'abc' * 30,
... # truncated key (too long), None value
... 'too long of a key name to display in repr': None,
... # wrapped bytes value (has b'' prefix)
... 'bytes wrapped value': b'abcd' * 25,
... # float value
... 0.1: 99.9999,
... # bool value
... 43: False,
... # None key, complex value
... None: complex(-1.0, 0.0),
... # nested quotes
... 10: '"\''
... }
>>> positional_metadata = pd.DataFrame.from_items([
... # str key, int list value
... ('foo', [1, 2, 3, 4]),
... # float key, float list value
... (42.5, [2.5, 3.0, 4.2, -0.00001]),
... # int key, object list value
... (42, [[], 4, 5, {}]),
... # truncated key (too long), bool list value
... ('abc' * 90, [True, False, False, True]),
... # None key
... (None, range(4))])
>>> Sequence('ACGT', metadata=metadata,
... positional_metadata=positional_metadata)
Sequence
-----------------------------------------------------------------------
Metadata:
None: (-1+0j)
True: 'abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc
abc abc abc abc abc abc abc abc abc abc abc abc abc abc abc
abc abc abc abc '
b'long word': 'abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabca
bcabcabcabcabcabcabcabcabcabcabcabcabc'
0.1: 99.9999
42.5: <class 'str'>
10: '"\''
42: 'some words to test text wrapping and such... yada yada yada
yada yada yada yada yada.'
43: False
'abc': 'some description'
'bar': <class 'dict'>
'bytes wrapped value': b'abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdab
cdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd
abcdabcdabcdabcd'
'foo': 42
<class 'str'>: None
<class 'tuple'>: <class 'list'>
Positional metadata:
'foo': <dtype: int64>
42.5: <dtype: float64>
42: <dtype: object>
<class 'str'>: <dtype: bool>
None: <dtype: int64>
Stats:
length: 4
-----------------------------------------------------------------------
0 ACGT
"""
pass
if __name__ == "__main__":
main()
| bsd-3-clause |
kagayakidan/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 249 | 1982 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, label='Elastic net coefficients')
plt.plot(lasso.coef_, label='Lasso coefficients')
plt.plot(coef, '--', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/lib/mpl_examples/pylab_examples/centered_ticklabels.py | 6 | 1355 | # sometimes it is nice to have ticklabels centered. mpl currently
# associates a label with a tick, and the label can be aligned
# 'center', 'left', or 'right' using the horizontal alignment property:
#
#
# for label in ax.xaxis.get_xticklabels():
# label.set_horizontalalignment('right')
#
#
# but this doesn't help center the label between ticks. One solution
# is to "face it". Use the minor ticks to place a tick centered
# between the major ticks. Here is an example that labels the months,
# centered between the ticks
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.dates as dates
import matplotlib.ticker as ticker
import matplotlib.pyplot as plt
# load some financial data; apple's stock price
fh = cbook.get_sample_data('aapl.npy.gz')
r = np.load(fh); fh.close()
r = r[-250:] # get the last 250 days
fig, ax = plt.subplots()
ax.plot(r.date, r.adj_close)
ax.xaxis.set_major_locator(dates.MonthLocator())
ax.xaxis.set_minor_locator(dates.MonthLocator(bymonthday=15))
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(dates.DateFormatter('%b'))
for tick in ax.xaxis.get_minor_ticks():
tick.tick1line.set_markersize(0)
tick.tick2line.set_markersize(0)
tick.label1.set_horizontalalignment('center')
imid = len(r)/2
ax.set_xlabel(str(r.date[imid].year))
plt.show()
| mit |
mne-tools/mne-tools.github.io | 0.21/_downloads/ae7d4d6bcae82f99a78c3f8a0c94f7b0/plot_mne_inverse_envelope_correlation.py | 3 | 4522 | """
.. _ex-envelope-correlation:
=============================================
Compute envelope correlations in source space
=============================================
Compute envelope correlations of orthogonalized activity [1]_ [2]_ in source
space using resting state CTF data.
"""
# Authors: Eric Larson <larson.eric.d@gmail.com>
# Sheraz Khan <sheraz@khansheraz.com>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.connectivity import envelope_correlation
from mne.minimum_norm import make_inverse_operator, apply_inverse_epochs
from mne.preprocessing import compute_proj_ecg, compute_proj_eog
data_path = mne.datasets.brainstorm.bst_resting.data_path()
subjects_dir = op.join(data_path, 'subjects')
subject = 'bst_resting'
trans = op.join(data_path, 'MEG', 'bst_resting', 'bst_resting-trans.fif')
src = op.join(subjects_dir, subject, 'bem', subject + '-oct-6-src.fif')
bem = op.join(subjects_dir, subject, 'bem', subject + '-5120-bem-sol.fif')
raw_fname = op.join(data_path, 'MEG', 'bst_resting',
'subj002_spontaneous_20111102_01_AUX.ds')
##############################################################################
# Here we do some things in the name of speed, such as crop (which will
# hurt SNR) and downsample. Then we compute SSP projectors and apply them.
raw = mne.io.read_raw_ctf(raw_fname, verbose='error')
raw.crop(0, 60).pick_types(meg=True, eeg=False).load_data().resample(80)
raw.apply_gradient_compensation(3)
projs_ecg, _ = compute_proj_ecg(raw, n_grad=1, n_mag=2)
projs_eog, _ = compute_proj_eog(raw, n_grad=1, n_mag=2, ch_name='MLT31-4407')
raw.info['projs'] += projs_ecg
raw.info['projs'] += projs_eog
raw.apply_proj()
cov = mne.compute_raw_covariance(raw) # compute before band-pass of interest
##############################################################################
# Now we band-pass filter our data and create epochs.
raw.filter(14, 30)
events = mne.make_fixed_length_events(raw, duration=5.)
epochs = mne.Epochs(raw, events=events, tmin=0, tmax=5.,
baseline=None, reject=dict(mag=8e-13), preload=True)
del raw
##############################################################################
# Compute the forward and inverse
# -------------------------------
src = mne.read_source_spaces(src)
fwd = mne.make_forward_solution(epochs.info, trans, src, bem)
inv = make_inverse_operator(epochs.info, fwd, cov)
del fwd, src
##############################################################################
# Compute label time series and do envelope correlation
# -----------------------------------------------------
labels = mne.read_labels_from_annot(subject, 'aparc_sub',
subjects_dir=subjects_dir)
epochs.apply_hilbert() # faster to apply in sensor space
stcs = apply_inverse_epochs(epochs, inv, lambda2=1. / 9., pick_ori='normal',
return_generator=True)
label_ts = mne.extract_label_time_course(
stcs, labels, inv['src'], return_generator=True)
corr = envelope_correlation(label_ts, verbose=True)
# let's plot this matrix
fig, ax = plt.subplots(figsize=(4, 4))
ax.imshow(corr, cmap='viridis', clim=np.percentile(corr, [5, 95]))
fig.tight_layout()
##############################################################################
# Compute the degree and plot it
# ------------------------------
# sphinx_gallery_thumbnail_number = 2
threshold_prop = 0.15 # percentage of strongest edges to keep in the graph
degree = mne.connectivity.degree(corr, threshold_prop=threshold_prop)
stc = mne.labels_to_stc(labels, degree)
stc = stc.in_label(mne.Label(inv['src'][0]['vertno'], hemi='lh') +
mne.Label(inv['src'][1]['vertno'], hemi='rh'))
brain = stc.plot(
clim=dict(kind='percent', lims=[75, 85, 95]), colormap='gnuplot',
subjects_dir=subjects_dir, views='dorsal', hemi='both',
smoothing_steps=25, time_label='Beta band')
##############################################################################
# References
# ----------
# .. [1] Hipp JF, Hawellek DJ, Corbetta M, Siegel M, Engel AK (2012)
# Large-scale cortical correlation structure of spontaneous
# oscillatory activity. Nature Neuroscience 15:884–890
# .. [2] Khan S et al. (2018). Maturation trajectories of cortical
# resting-state networks depend on the mediating frequency band.
# Neuroimage 174:57–68
| bsd-3-clause |
dsquareindia/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 63 | 3231 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import itertools
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
class_names = iris.target_names
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
lindemann09/pyForceDAQ | forceDAQ/data_handling/read_force_data.py | 1 | 2147 | """
Functions to read your force and event data
"""
__author__ = 'Oliver Lindemann'
import os
import sys
import gzip
from collections import OrderedDict
import numpy as np
TAG_COMMENTS = "#"
TAG_UDPDATA = TAG_COMMENTS + "UDP"
TAG_DAQEVENTS = TAG_COMMENTS + "T"
def _csv(line):
return list(map(lambda x: x.strip(), line.split(",")))
def DataFrameDict(data, varnames):
"""data frame: Dict of numpy arrays
does not require Pandas, but can be easily converted to pandas dataframe
via pandas.DataFrame(data_frame_dict)
"""
rtn = OrderedDict()
for v in varnames:
rtn[v] = []
for row in data:
for v, d in zip(varnames, row):
rtn[v].append(d)
return rtn
def data_frame_to_text(data_frame):
rtn = ",".join(data_frame.keys())
rtn += "\n"
for x in np.array(list(data_frame.values())).T:
rtn += ",".join(x) + "\n"
return rtn
def read_raw_data(path):
"""reading trigger and udp data
Returns: data, udp_event, daq_events and comments
data, udp_event, daq_events: DataFrameDict
comments: text string
"""
daq_events = []
udp_events = []
comments = ""
data = []
varnames = None
app_dir = os.path.split(sys.argv[0])[0]
path = os.path.abspath(os.path.join(app_dir, path))
if path.endswith("gz"):
fl = gzip.open(path, "rt")
else:
fl = open(path, "rt")
for ln in fl:
if ln.startswith(TAG_COMMENTS):
comments += ln
if ln.startswith(TAG_UDPDATA + ","):
udp_events.append(_csv(ln[len(TAG_UDPDATA) + 1:]))
elif ln.startswith(TAG_DAQEVENTS):
daq_events.append(_csv(ln[len(TAG_DAQEVENTS) + 1:]))
else:
# data
if varnames is None:
# first row contains varnames
varnames = _csv(ln)
else:
data.append(_csv(ln))
fl.close()
return (DataFrameDict(data, varnames),
DataFrameDict(udp_events, ["time", "value"]),
DataFrameDict(daq_events, ["time", "value"]),
comments)
| mit |
cemarchi/biosphere | Src/BioAnalyzer/Analysis/GenePrioritization/Steps/DataIntegration/IntermediateRepresentation/Transformers/MicroRnaToGeneTransformer.py | 1 | 4546 | import math
import statistics
from itertools import groupby
from random import randint
from typing import Dict, Tuple, Counter
import pandas as pd
from Src.BioAnalyzer.Analysis.GenePrioritization.Steps.DataIntegration.IntermediateRepresentation.Generators import \
IntermediateRepresentationGeneratorBase
from Src.BioAnalyzer.Analysis.GenePrioritization.Steps.DataIntegration.IntermediateRepresentation.Transformers.SampleTransformerBase import \
SampleTransformerBase
from Src.BioDataManagement.CrossCutting.DTOs.ExpressionLevelStatusDto import ExpressionLevelStatusDto
class MicroRnaToGeneTransformer(SampleTransformerBase):
"""
"""
def __init__(self,
intermediateRepresentationGenerator: IntermediateRepresentationGeneratorBase,
get_global_diff_values_action,
get_mirna_gene_target_action):
super().__init__(intermediateRepresentationGenerator)
self.__get_mirna_gene_target_action = get_mirna_gene_target_action
self.__get_global_diff_values_action = get_global_diff_values_action
def transform(self, from_sample_matrix: pd.DataFrame, is_highly_significant: bool) -> Tuple[pd.DataFrame, Dict[int, ExpressionLevelStatusDto]]:
mirna_gene_targets = {mirna.lower(): g for mirna, g in
self.__get_mirna_gene_targets(from_sample_matrix.columns.tolist()).items()}
mirna_samples = self.__get_mirna_samples(from_sample_matrix, mirna_gene_targets)
id_entrez_list = list(set([id_entrez for mirna_symbol, id_entrez_list in mirna_gene_targets.items()
for id_entrez in id_entrez_list]))
measure_matrix = dict([(g, []) for g in id_entrez_list])
key_func = lambda gene: gene[0]
for patient_id, exp_values in mirna_samples.items():
gene_values = [(id_entrez,
exp_value) for mirna_symbol, exp_value in exp_values.items()
for id_entrez in mirna_gene_targets[mirna_symbol]]
gene_values = sorted(gene_values, key=key_func)
for id_entrez, measures in groupby(gene_values, key_func):
measures = [measure for id_entrez, measure in list(measures) if not math.isnan(measure)]
measure_matrix[id_entrez].append(float('NaN') if not measures else statistics.mean(measures))
gene_matrix = pd.DataFrame.from_dict(measure_matrix).dropna(axis=1,how='all')
gene_matrix = self.intermediateRepresentationGenerator.generate(gene_matrix).dropna(axis=1,how='all')
return gene_matrix, \
self.__get_gene_status(mirna_gene_targets, gene_matrix.columns.tolist(), is_highly_significant)
def __get_mirna_gene_targets(self, mirnas):
gene_targets = {}
fe_target = self.__get_mirna_gene_target_action(mirnas)
gene_targets.update(dict([(t.microrna_symbol, list(set(gene_targets[t.microrna_symbol] + t.id_entrez_genes)))
if t.microrna_symbol in gene_targets
else (t.microrna_symbol, t.id_entrez_genes) for t in fe_target.result_list]))
return gene_targets
def __get_mirna_samples(self, from_sample_matrix, mirna_gene_targets):
from_sample_matrix = from_sample_matrix[list(mirna_gene_targets.keys()) + ['patient_id']]
from_sample_matrix.set_index("patient_id", drop=True, inplace=True)
return from_sample_matrix.to_dict(orient="index")
def __get_gene_status(self, mirna_gene_targets, genes, is_highly_significant):
diff_mirna = [diff for diff in self.__get_global_diff_values_action(is_highly_significant).result.values
if diff.element_id in mirna_gene_targets]
genes_status = [(g, diff.status) for diff in diff_mirna
for g in mirna_gene_targets[diff.element_id] if g in genes]
key_func = lambda gene: gene[0]
genes_status = sorted(genes_status, key=key_func)
genes_status_dict = {}
for id_entrez, status in groupby(genes_status, key_func):
status = list(status)
status_counter = Counter(status)
status = [k for k, v in status_counter.most_common()]
len_status = len(status) - 1
genes_status_dict[id_entrez] = status[0] if len_status == 1 else status[randint(0, len_status)]
return dict([(entrez_id, status[1]) for entrez_id, status in genes_status_dict.items()]) | bsd-3-clause |
phobson/statsmodels | statsmodels/datasets/randhie/data.py | 3 | 2650 | """RAND Health Insurance Experiment Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is in the public domain."""
TITLE = __doc__
SOURCE = """
The data was collected by the RAND corporation as part of the Health
Insurance Experiment (HIE).
http://www.rand.org/health/projects/hie.html
This data was used in::
Cameron, A.C. amd Trivedi, P.K. 2005. `Microeconometrics: Methods
and Applications,` Cambridge: New York.
And was obtained from: <http://cameron.econ.ucdavis.edu/mmabook/mmadata.html>
See randhie/src for the original data and description. The data included
here contains only a subset of the original data. The data varies slightly
compared to that reported in Cameron and Trivedi.
"""
DESCRSHORT = """The RAND Co. Health Insurance Experiment Data"""
DESCRLONG = """"""
NOTE = """::
Number of observations - 20,190
Number of variables - 10
Variable name definitions::
mdvis - Number of outpatient visits to an MD
lncoins - ln(coinsurance + 1), 0 <= coninsurance <= 100
idp - 1 if individual deductible plan, 0 otherwise
lpi - ln(max(1, annual participation incentive payment))
fmde - 0 if idp = 1; ln(max(1, MDE/(0.01 coinsurance))) otherwise
physlm - 1 if the person has a physical limitation
disea - number of chronic diseases
hlthg - 1 if self-rated health is good
hlthf - 1 if self-rated health is fair
hlthp - 1 if self-rated health is poor
(Omitted category is excellent self-rated health)
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
PATH = '%s/%s' % (dirname(abspath(__file__)), 'randhie.csv')
def load():
"""
Loads the RAND HIE data and returns a Dataset class.
----------
endog - response variable, mdvis
exog - design
Returns
Load instance:
a class of the data with array attrbutes 'endog' and 'exog'
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""
Loads the RAND HIE data and returns a Dataset class.
----------
endog - response variable, mdvis
exog - design
Returns
Load instance:
a class of the data with array attrbutes 'endog' and 'exog'
"""
from pandas import read_csv
data = read_csv(PATH)
return du.process_recarray_pandas(data, endog_idx=0)
def _get_data():
with open(PATH, "rb") as f:
data = recfromtxt(f, delimiter=",", names=True, dtype=float)
return data
| bsd-3-clause |
pr-omethe-us/PyKED | pyked/chemked.py | 1 | 44185 | """
Main ChemKED module
"""
# Standard libraries
from os.path import exists
from collections import namedtuple
from warnings import warn
from copy import deepcopy
import xml.etree.ElementTree as etree
import xml.dom.minidom as minidom
from itertools import chain
import numpy as np
# Local imports
from .validation import schema, OurValidator, yaml, Q_
from .converters import datagroup_properties, ReSpecTh_to_ChemKED
VolumeHistory = namedtuple('VolumeHistory', ['time', 'volume'])
VolumeHistory.__doc__ = 'Time history of the volume in an RCM experiment. Deprecated, to be removed after PyKED 0.4' # noqa: E501
VolumeHistory.time.__doc__ = '(`~numpy.ndarray`): the time during the experiment'
VolumeHistory.volume.__doc__ = '(`~numpy.ndarray`): the volume during the experiment'
TimeHistory = namedtuple('TimeHistory', ['time', 'quantity', 'type'])
TimeHistory.__doc__ = 'Time history of the quantity in an RCM experiment'
TimeHistory.time.__doc__ = '(`~numpy.ndarray`): the time during the experiment'
TimeHistory.quantity.__doc__ = '(`~numpy.ndarray`): the quantity of interest during the experiment'
TimeHistory.type.__doc__ = """\
(`str`): the type of time history represented. Possible options are:
* volume
* temperature
* pressure
* piston position
* light emission
* OH emission
* absorption
"""
RCMData = namedtuple(
'RCMData',
['compressed_pressure', 'compressed_temperature', 'compression_time', 'stroke',
'clearance', 'compression_ratio']
)
RCMData.__doc__ = 'Data fields specific to rapid compression machine experiments'
RCMData.compressed_pressure.__doc__ = '(`~pint.Quantity`) The pressure at the end of compression'
RCMData.compressed_temperature.__doc__ = """\
(`~pint.Quantity`) The temperature at the end of compression"""
RCMData.compression_time.__doc__ = '(`~pint.Quantity`) The duration of the compression stroke'
RCMData.stroke.__doc__ = '(`~pint.Quantity`) The length of the stroke'
RCMData.clearance.__doc__ = """\
(`~pint.Quantity`) The clearance between piston face and end wall at the end of compression"""
RCMData.compression_ratio.__doc__ = '(`~pint.Quantity`) The volumetric compression ratio'
Reference = namedtuple('Reference',
['volume', 'journal', 'doi', 'authors', 'detail', 'year', 'pages'])
Reference.__doc__ = 'Information about the article or report where the data can be found'
Reference.volume.__doc__ = '(`str`) The journal volume'
Reference.journal.__doc__ = '(`str`) The name of the journal'
Reference.doi.__doc__ = '(`str`) The Digital Object Identifier of the article'
Reference.authors.__doc__ = '(`list`) The list of authors of the article'
Reference.detail.__doc__ = '(`str`) Detail about where the data can be found in the article'
Reference.year.__doc__ = '(`str`) The year the article was published'
Reference.pages.__doc__ = '(`str`) The pages in the journal where the article was published'
Apparatus = namedtuple('Apparatus', ['kind', 'institution', 'facility'])
Apparatus.__doc__ = 'Information about the experimental apparatus used to generate the data'
Apparatus.kind.__doc__ = '(`str`) The kind of experimental apparatus'
Apparatus.institution.__doc__ = '(`str`) The institution where the experiment is located'
Apparatus.facility.__doc__ = '(`str`) The particular experimental facility at the location'
Composition = namedtuple('Composition', 'species_name InChI SMILES atomic_composition amount')
Composition.__doc__ = 'Detail of the initial composition of the mixture for the experiment'
Composition.species_name.__doc__ = '(`str`) The name of the species'
Composition.InChI.__doc__ = '(`str`) The InChI identifier for the species'
Composition.SMILES.__doc__ = '(`str`) The SMILES identifier for the species'
Composition.atomic_composition.__doc__ = '(`dict`) The atomic composition of the species'
Composition.amount.__doc__ = '(`~pint.Quantity`) The amount of this species'
class ChemKED(object):
"""Main ChemKED class.
The ChemKED class stores information about the contents of a ChemKED database
file. It stores each datapoint associated with the database and provides access
the the reference information, versions, and file author.
Arguments:
yaml_file (`str`, optional): The filename of the YAML database in ChemKED format.
dict_input (`dict`, optional): A dictionary with the parsed ouput of YAML file in ChemKED
format.
skip_validation (`bool`, optional): Whether validation of the ChemKED should be done. Must
be supplied as a keyword-argument.
Attributes:
datapoints (`list`): List of `DataPoint` objects storing each datapoint in the database.
reference (`~collections.namedtuple`): Attributes include ``volume``, ``journal``, ``doi``,
``authors``, ``detail``, ``year``, and ``pages`` describing the reference from which the
datapoints are derived.
apparatus (`~collections.namedtuple`): Attributes include ``kind`` of experimental
apparatus, and the ``institution`` and ``facility`` where the experimental apparatus is
located.
chemked_version (`str`): Version of the ChemKED database schema used in this file.
experiment_type (`str`): Type of exeperimental data contained in this database.
file_author (`dict`): Information about the author of the ChemKED database file.
file_version (`str`): Version of the ChemKED database file.
_properties (`dict`): Original dictionary read from ChemKED database file, meant for
internal use.
"""
def __init__(self, yaml_file=None, dict_input=None, *, skip_validation=False):
if yaml_file is not None:
with open(yaml_file, 'r') as f:
self._properties = yaml.safe_load(f)
elif dict_input is not None:
self._properties = dict_input
else:
raise NameError("ChemKED needs either a YAML filename or dictionary as input.")
if not skip_validation:
self.validate_yaml(self._properties)
self.datapoints = []
for point in self._properties['datapoints']:
self.datapoints.append(DataPoint(point))
self.reference = Reference(
volume=self._properties['reference'].get('volume'),
journal=self._properties['reference'].get('journal'),
doi=self._properties['reference'].get('doi'),
authors=self._properties['reference'].get('authors'),
detail=self._properties['reference'].get('detail'),
year=self._properties['reference'].get('year'),
pages=self._properties['reference'].get('pages'),
)
self.apparatus = Apparatus(
kind=self._properties['apparatus'].get('kind'),
institution=self._properties['apparatus'].get('institution'),
facility=self._properties['apparatus'].get('facility'),
)
for prop in ['chemked-version', 'experiment-type', 'file-authors', 'file-version']:
setattr(self, prop.replace('-', '_'), self._properties[prop])
@classmethod
def from_respecth(cls, filename_xml, file_author='', file_author_orcid=''):
"""Construct a ChemKED instance directly from a ReSpecTh file.
Arguments:
filename_xml (`str`): Filename of the ReSpecTh-formatted XML file to be imported
file_author (`str`, optional): File author to be added to the list generated from the
XML file
file_author_orcid (`str`, optional): ORCID for the file author being added to the list
of file authors
Returns:
`ChemKED`: Instance of the `ChemKED` class containing the data in ``filename_xml``.
Examples:
>>> ck = ChemKED.from_respecth('respecth_file.xml')
>>> ck = ChemKED.from_respecth('respecth_file.xml', file_author='Bryan W. Weber')
>>> ck = ChemKED.from_respecth('respecth_file.xml', file_author='Bryan W. Weber',
file_author_orcid='0000-0000-0000-0000')
"""
properties = ReSpecTh_to_ChemKED(filename_xml, file_author, file_author_orcid,
validate=False)
return cls(dict_input=properties)
def validate_yaml(self, properties):
"""Validate the parsed YAML file for adherance to the ChemKED format.
Arguments:
properties (`dict`): Dictionary created from the parsed YAML file
Raises:
`ValueError`: If the YAML file cannot be validated, a `ValueError` is raised whose
string contains the errors that are present.
"""
validator = OurValidator(schema)
if not validator.validate(properties):
for key, value in validator.errors.items():
if any(['unallowed value' in v for v in value]):
print(('{key} has an illegal value. Allowed values are {values} and are case '
'sensitive.').format(key=key, values=schema[key]['allowed']))
raise ValueError(validator.errors)
def get_dataframe(self, output_columns=None):
"""Get a Pandas DataFrame of the datapoints in this instance.
Arguments:
output_columns (`list`, optional): List of strings specifying the columns to include
in the output DataFrame. The default is `None`, which outputs all of the
columns. Options include (not case sensitive):
* ``Temperature``
* ``Pressure``
* ``Ignition Delay``
* ``Composition``
* ``Equivalence Ratio``
* ``Reference``
* ``Apparatus``
* ``Experiment Type``
* ``File Author``
* ``File Version``
* ``ChemKED Version``
In addition, specific fields from the ``Reference`` and ``Apparatus`` attributes can
be included by specifying the name after a colon. These options are:
* ``Reference:Volume``
* ``Reference:Journal``
* ``Reference:DOI``
* ``Reference:Authors``
* ``Reference:Detail``
* ``Reference:Year``
* ``Reference:Pages``
* ``Apparatus:Kind``
* ``Apparatus:Facility``
* ``Apparatus:Institution``
Only the first author is printed when ``Reference`` or ``Reference:Authors`` is
selected because the whole author list may be quite long.
Note:
If the Composition is selected as an output type, the composition specified in the
`DataPoint` is used. No attempt is made to convert to a consistent basis; mole fractions
will remain mole fractions, mass fractions will remain mass fractions, and mole percent
will remain mole percent. Therefore, it is possible to end up with more than one type of
composition specification in a given column. However, if the composition is included
in the resulting dataframe, the type of each composition will be specified by the "Kind"
field in each row.
Examples:
>>> df = ChemKED(yaml_file).get_dataframe()
>>> df = ChemKED(yaml_file).get_dataframe(['Temperature', 'Ignition Delay'])
Returns:
`~pandas.DataFrame`: Contains the information regarding each point in the ``datapoints``
attribute
"""
import pandas as pd
valid_labels = [a.replace('_', ' ') for a in self.__dict__
if not (a.startswith('__') or a.startswith('_'))
]
valid_labels.remove('datapoints')
valid_labels.extend(
['composition', 'ignition delay', 'temperature', 'pressure', 'equivalence ratio']
)
ref_index = valid_labels.index('reference')
valid_labels[ref_index:ref_index + 1] = ['reference:' + a for a in Reference._fields]
app_index = valid_labels.index('apparatus')
valid_labels[app_index:app_index + 1] = ['apparatus:' + a for a in Apparatus._fields]
species_list = list(set(chain(*[list(d.composition.keys()) for d in self.datapoints])))
if output_columns is None or len(output_columns) == 0:
col_labels = valid_labels
comp_index = col_labels.index('composition')
col_labels[comp_index:comp_index + 1] = species_list + ['Composition:Kind']
else:
output_columns = [a.lower() for a in output_columns]
col_labels = []
for col in output_columns:
if col in valid_labels or col in ['reference', 'apparatus']:
col_labels.append(col)
else:
raise ValueError('{} is not a valid output column choice'.format(col))
if 'composition' in col_labels:
comp_index = col_labels.index('composition')
col_labels[comp_index:comp_index + 1] = species_list + ['Composition:Kind']
if 'reference' in col_labels:
ref_index = col_labels.index('reference')
col_labels[ref_index:ref_index + 1] = ['reference:' + a for a in Reference._fields]
if 'apparatus' in col_labels:
app_index = col_labels.index('apparatus')
col_labels[app_index:app_index + 1] = ['apparatus:' + a for a in Apparatus._fields]
data = []
for d in self.datapoints:
row = []
d_species = list(d.composition.keys())
for col in col_labels:
if col in species_list:
if col in d_species:
row.append(d.composition[col].amount)
else:
row.append(Q_(0.0, 'dimensionless'))
elif 'reference' in col or 'apparatus' in col:
split_col = col.split(':')
if split_col[1] == 'authors':
row.append(getattr(getattr(self, split_col[0]), split_col[1])[0]['name'])
else:
row.append(getattr(getattr(self, split_col[0]), split_col[1]))
elif col in ['temperature', 'pressure', 'ignition delay', 'equivalence ratio']:
row.append(getattr(d, col.replace(' ', '_')))
elif col == 'file authors':
row.append(getattr(self, col.replace(' ', '_'))[0]['name'])
elif col == 'Composition:Kind':
row.append(d.composition_type)
else:
row.append(getattr(self, col.replace(' ', '_')))
data.append(row)
col_labels = [a.title() for a in col_labels]
columns = pd.Index(col_labels)
return pd.DataFrame(data=data, columns=columns)
def write_file(self, filename, *, overwrite=False):
"""Write new ChemKED YAML file based on object.
Arguments:
filename (`str`): Filename for target YAML file
overwrite (`bool`, optional): Whether to overwrite file with given name if present.
Must be supplied as a keyword-argument.
Raises:
`NameError`: If ``filename`` is already present, and ``overwrite`` is not ``True``.
Example:
>>> dataset = ChemKED(yaml_file)
>>> dataset.write_file(new_yaml_file)
"""
# Ensure file isn't already present
if exists(filename) and not overwrite:
raise OSError(filename + ' already present. Specify "overwrite=True" '
'to overwrite, or rename.'
)
with open(filename, 'w') as yaml_file:
yaml.dump(self._properties, yaml_file)
def convert_to_ReSpecTh(self, filename):
"""Convert ChemKED record to ReSpecTh XML file.
This converter uses common information in a ChemKED file to generate a
ReSpecTh XML file. Note that some information may be lost, as ChemKED stores
some additional attributes.
Arguments:
filename (`str`): Filename for output ReSpecTh XML file.
Example:
>>> dataset = ChemKED(yaml_file)
>>> dataset.convert_to_ReSpecTh(xml_file)
"""
root = etree.Element('experiment')
file_author = etree.SubElement(root, 'fileAuthor')
file_author.text = self.file_authors[0]['name']
# right now ChemKED just uses an integer file version
file_version = etree.SubElement(root, 'fileVersion')
major_version = etree.SubElement(file_version, 'major')
major_version.text = str(self.file_version)
minor_version = etree.SubElement(file_version, 'minor')
minor_version.text = '0'
respecth_version = etree.SubElement(root, 'ReSpecThVersion')
major_version = etree.SubElement(respecth_version, 'major')
major_version.text = '1'
minor_version = etree.SubElement(respecth_version, 'minor')
minor_version.text = '0'
# Only ignition delay currently supported
exp = etree.SubElement(root, 'experimentType')
if self.experiment_type == 'ignition delay':
exp.text = 'Ignition delay measurement'
else:
raise NotImplementedError('Only ignition delay type supported for conversion.')
reference = etree.SubElement(root, 'bibliographyLink')
citation = ''
for author in self.reference.authors:
citation += author['name'] + ', '
citation += (self.reference.journal + ' (' + str(self.reference.year) + ') ' +
str(self.reference.volume) + ':' + self.reference.pages + '. ' +
self.reference.detail
)
reference.set('preferredKey', citation)
reference.set('doi', self.reference.doi)
apparatus = etree.SubElement(root, 'apparatus')
kind = etree.SubElement(apparatus, 'kind')
kind.text = self.apparatus.kind
common_properties = etree.SubElement(root, 'commonProperties')
# ChemKED objects have no common properties once loaded. Check for properties
# among datapoints that tend to be common
common = []
composition = self.datapoints[0].composition
# Composition type *has* to be the same
composition_type = self.datapoints[0].composition_type
if not all(dp.composition_type == composition_type for dp in self.datapoints):
raise NotImplementedError('Error: ReSpecTh does not support varying composition '
'type among datapoints.'
)
if all([composition == dp.composition for dp in self.datapoints]):
# initial composition is common
common.append('composition')
prop = etree.SubElement(common_properties, 'property')
prop.set('name', 'initial composition')
for species_name, species in composition.items():
component = etree.SubElement(prop, 'component')
species_link = etree.SubElement(component, 'speciesLink')
species_link.set('preferredKey', species_name)
if species.InChI is not None:
species_link.set('InChI', species.InChI)
amount = etree.SubElement(component, 'amount')
amount.set('units', composition_type)
amount.text = str(species.amount.magnitude)
# If multiple datapoints present, then find any common properties. If only
# one datapoint, then composition should be the only "common" property.
if len(self.datapoints) > 1:
for prop_name in datagroup_properties:
attribute = prop_name.replace(' ', '_')
quantities = [getattr(dp, attribute, False) for dp in self.datapoints]
# All quantities must have the property in question and all the
# values must be equal
if all(quantities) and quantities.count(quantities[0]) == len(quantities):
common.append(prop_name)
prop = etree.SubElement(common_properties, 'property')
prop.set('description', '')
prop.set('name', prop_name)
prop.set('units', str(quantities[0].units))
value = etree.SubElement(prop, 'value')
value.text = str(quantities[0].magnitude)
# Ignition delay can't be common, unless only a single datapoint.
datagroup = etree.SubElement(root, 'dataGroup')
datagroup.set('id', 'dg1')
datagroup_link = etree.SubElement(datagroup, 'dataGroupLink')
datagroup_link.set('dataGroupID', '')
datagroup_link.set('dataPointID', '')
property_idx = {}
labels = {'temperature': 'T', 'pressure': 'P',
'ignition delay': 'tau', 'pressure rise': 'dP/dt',
}
for prop_name in datagroup_properties:
attribute = prop_name.replace(' ', '_')
# This can't be hasattr because properties are set to the value None
# if no value is specified in the file, so the attribute always exists
prop_indices = [i for i, dp in enumerate(self.datapoints)
if getattr(dp, attribute) is not None
]
if prop_name in common or not prop_indices:
continue
prop = etree.SubElement(datagroup, 'property')
prop.set('description', '')
prop.set('name', prop_name)
units = str(getattr(self.datapoints[prop_indices[0]], attribute).units)
prop.set('units', units)
idx = 'x{}'.format(len(property_idx) + 1)
property_idx[idx] = {'name': prop_name, 'units': units}
prop.set('id', idx)
prop.set('label', labels[prop_name])
# Need to handle datapoints with possibly different species in the initial composition
if 'composition' not in common:
for dp in self.datapoints:
for species in dp.composition.values():
# Only add new property for species not already considered
has_spec = any([species.species_name in d.values()
for d in property_idx.values()
])
if not has_spec:
prop = etree.SubElement(datagroup, 'property')
prop.set('description', '')
idx = 'x{}'.format(len(property_idx) + 1)
property_idx[idx] = {'name': species.species_name}
prop.set('id', idx)
prop.set('label', '[' + species.species_name + ']')
prop.set('name', 'composition')
prop.set('units', self.datapoints[0].composition_type)
species_link = etree.SubElement(prop, 'speciesLink')
species_link.set('preferredKey', species.species_name)
if species.InChI is not None:
species_link.set('InChI', species.InChI)
for dp in self.datapoints:
datapoint = etree.SubElement(datagroup, 'dataPoint')
for idx, val in property_idx.items():
# handle regular properties a bit differently than composition
if val['name'] in datagroup_properties:
value = etree.SubElement(datapoint, idx)
quantity = getattr(dp, val['name'].replace(' ', '_')).to(val['units'])
value.text = str(quantity.magnitude)
else:
# composition
for item in dp.composition.values():
if item.species_name == val['name']:
value = etree.SubElement(datapoint, idx)
value.text = str(item.amount.magnitude)
# See https://stackoverflow.com/a/16097112 for the None.__ne__
history_types = ['volume_history', 'temperature_history', 'pressure_history',
'piston_position_history', 'light_emission_history',
'OH_emission_history', 'absorption_history']
time_histories = [getattr(dp, p) for dp in self.datapoints for p in history_types]
time_histories = list(filter(None.__ne__, time_histories))
if len(self.datapoints) > 1 and len(time_histories) > 1:
raise NotImplementedError('Error: ReSpecTh files do not support multiple datapoints '
'with a time history.')
elif len(time_histories) > 0:
for dg_idx, hist in enumerate(time_histories):
if hist.type not in ['volume', 'temperature', 'pressure']:
warn('The time-history type {} is not supported by ReSpecTh for '
'ignition delay experiments'.format(hist.type))
continue
datagroup = etree.SubElement(root, 'dataGroup')
datagroup.set('id', 'dg{}'.format(dg_idx))
datagroup_link = etree.SubElement(datagroup, 'dataGroupLink')
datagroup_link.set('dataGroupID', '')
datagroup_link.set('dataPointID', '')
# Time history has two properties: time and quantity.
prop = etree.SubElement(datagroup, 'property')
prop.set('description', '')
prop.set('name', 'time')
prop.set('units', str(hist.time.units))
time_idx = 'x{}'.format(len(property_idx) + 1)
property_idx[time_idx] = {'name': 'time'}
prop.set('id', time_idx)
prop.set('label', 't')
prop = etree.SubElement(datagroup, 'property')
prop.set('description', '')
prop.set('name', hist.type)
prop.set('units', str(hist.quantity.units))
quant_idx = 'x{}'.format(len(property_idx) + 1)
property_idx[quant_idx] = {'name': hist.type}
prop.set('id', quant_idx)
prop.set('label', 'V')
for time, quantity in zip(hist.time, hist.quantity):
datapoint = etree.SubElement(datagroup, 'dataPoint')
value = etree.SubElement(datapoint, time_idx)
value.text = str(time.magnitude)
value = etree.SubElement(datapoint, quant_idx)
value.text = str(quantity.magnitude)
ign_types = [getattr(dp, 'ignition_type', False) for dp in self.datapoints]
# All datapoints must have the same ignition target and type
if all(ign_types) and ign_types.count(ign_types[0]) == len(ign_types):
# In ReSpecTh files all datapoints must share ignition type
ignition = etree.SubElement(root, 'ignitionType')
if ign_types[0]['target'] in ['pressure', 'temperature']:
ignition.set('target', ign_types[0]['target'][0].upper())
else:
# options left are species
ignition.set('target', self.datapoints[0].ignition_type['target'])
if ign_types[0]['type'] == 'd/dt max extrapolated':
ignition.set('type', 'baseline max intercept from d/dt')
else:
ignition.set('type', self.datapoints[0].ignition_type['type'])
else:
raise NotImplementedError('Different ignition targets or types for multiple datapoints '
'are not supported in ReSpecTh.')
et = etree.ElementTree(root)
et.write(filename, encoding='utf-8', xml_declaration=True)
# now do a "pretty" rewrite
xml = minidom.parse(filename)
xml_string = xml.toprettyxml(indent=' ')
with open(filename, 'w') as f:
f.write(xml_string)
print('Converted to ' + filename)
class DataPoint(object):
"""Class for a single datapoint.
The `DataPoint` class stores the information associated with a single data point in the dataset
parsed from the `ChemKED` YAML input.
Arguments:
properties (`dict`): Dictionary adhering to the ChemKED format for ``datapoints``
Attributes:
composition (`list`): List of dictionaries representing the species and their quantities
ignition_delay (pint.Quantity): The ignition delay of the experiment
temperature (pint.Quantity): The temperature of the experiment
pressure (pint.Quantity): The pressure of the experiment
pressure_rise (pint.Quantity, optional): The amount of pressure rise during the induction
period of a shock tube experiment.
compression_time (pint.Quantity, optional): The compression time for an RCM experiment.
compressed_pressure (pint.Quantity, optional): The pressure at the end of compression for
an RCM experiment.
compressed_temperature (pint.Quantity, optional): The temperature at the end of compression
for an RCM experiment.
first_stage_ignition_delay (pint.Quantity, optional): The first stage ignition delay of the
experiment.
compression_time (pint.Quantity, optional): The compression time for an RCM experiment.
ignition_type (`dict`): Dictionary with the ignition target and type.
volume_history (`~collections.namedtuple`, optional): The volume history of the reactor
during an RCM experiment.
pressure_history (`~collections.namedtuple`, optional): The pressure history of the reactor
during an experiment.
temperature_history (`~collections.namedtuple`, optional): The temperature history of the
reactor during an experiment.
piston_position_history (`~collections.namedtuple`, optional): The piston position history
of the reactor during an RCM experiment.
light_emission_history (`~collections.namedtuple`, optional): The light emission history
of the reactor during an experiment.
OH_emission_history (`~collections.namedtuple`, optional): The OH emission history of the
reactor during an experiment.
absorption_history (`~collections.namedtuple`, optional): The absorption history of the
reactor during an experiment.
"""
value_unit_props = [
'ignition-delay', 'first-stage-ignition-delay', 'temperature', 'pressure',
'pressure-rise',
]
rcm_data_props = [
'compressed-pressure', 'compressed-temperature', 'compression-time', 'stroke', 'clearance',
'compression-ratio'
]
def __init__(self, properties):
for prop in self.value_unit_props:
if prop in properties:
quant = self.process_quantity(properties[prop])
setattr(self, prop.replace('-', '_'), quant)
else:
setattr(self, prop.replace('-', '_'), None)
if 'rcm-data' in properties:
orig_rcm_data = properties['rcm-data']
rcm_props = {}
for prop in self.rcm_data_props:
if prop in orig_rcm_data:
quant = self.process_quantity(orig_rcm_data[prop])
rcm_props[prop.replace('-', '_')] = quant
else:
rcm_props[prop.replace('-', '_')] = None
self.rcm_data = RCMData(**rcm_props)
else:
self.rcm_data = None
self.composition_type = properties['composition']['kind']
composition = {}
for species in properties['composition']['species']:
species_name = species['species-name']
amount = self.process_quantity(species['amount'])
InChI = species.get('InChI')
SMILES = species.get('SMILES')
atomic_composition = species.get('atomic-composition')
composition[species_name] = Composition(
species_name=species_name, InChI=InChI, SMILES=SMILES,
atomic_composition=atomic_composition, amount=amount)
setattr(self, 'composition', composition)
self.equivalence_ratio = properties.get('equivalence-ratio')
self.ignition_type = deepcopy(properties.get('ignition-type'))
if 'time-histories' in properties and 'volume-history' in properties:
raise TypeError('time-histories and volume-history are mutually exclusive')
if 'time-histories' in properties:
for hist in properties['time-histories']:
if hasattr(self, '{}_history'.format(hist['type'].replace(' ', '_'))):
raise ValueError('Each history type may only be specified once. {} was '
'specified multiple times'.format(hist['type']))
time_col = hist['time']['column']
time_units = hist['time']['units']
quant_col = hist['quantity']['column']
quant_units = hist['quantity']['units']
if isinstance(hist['values'], list):
values = np.array(hist['values'])
else:
# Load the values from a file
values = np.genfromtxt(hist['values']['filename'], delimiter=',')
time_history = TimeHistory(
time=Q_(values[:, time_col], time_units),
quantity=Q_(values[:, quant_col], quant_units),
type=hist['type'],
)
setattr(self, '{}_history'.format(hist['type'].replace(' ', '_')), time_history)
if 'volume-history' in properties:
warn('The volume-history field should be replaced by time-histories. '
'volume-history will be removed after PyKED 0.4',
DeprecationWarning)
time_col = properties['volume-history']['time']['column']
time_units = properties['volume-history']['time']['units']
volume_col = properties['volume-history']['volume']['column']
volume_units = properties['volume-history']['volume']['units']
values = np.array(properties['volume-history']['values'])
self.volume_history = VolumeHistory(
time=Q_(values[:, time_col], time_units),
volume=Q_(values[:, volume_col], volume_units),
)
history_types = ['volume', 'temperature', 'pressure', 'piston_position', 'light_emission',
'OH_emission', 'absorption']
for h in history_types:
if not hasattr(self, '{}_history'.format(h)):
setattr(self, '{}_history'.format(h), None)
def process_quantity(self, properties):
"""Process the uncertainty information from a given quantity and return it
"""
quant = Q_(properties[0])
if len(properties) > 1:
unc = properties[1]
uncertainty = unc.get('uncertainty', False)
upper_uncertainty = unc.get('upper-uncertainty', False)
lower_uncertainty = unc.get('lower-uncertainty', False)
uncertainty_type = unc.get('uncertainty-type')
if uncertainty_type == 'relative':
if uncertainty:
quant = quant.plus_minus(float(uncertainty), relative=True)
elif upper_uncertainty and lower_uncertainty:
warn('Asymmetric uncertainties are not supported. The '
'maximum of lower-uncertainty and upper-uncertainty '
'has been used as the symmetric uncertainty.')
uncertainty = max(float(upper_uncertainty), float(lower_uncertainty))
quant = quant.plus_minus(uncertainty, relative=True)
else:
raise ValueError('Either "uncertainty" or "upper-uncertainty" and '
'"lower-uncertainty" need to be specified.')
elif uncertainty_type == 'absolute':
if uncertainty:
uncertainty = Q_(uncertainty)
quant = quant.plus_minus(uncertainty.to(quant.units).magnitude)
elif upper_uncertainty and lower_uncertainty:
warn('Asymmetric uncertainties are not supported. The '
'maximum of lower-uncertainty and upper-uncertainty '
'has been used as the symmetric uncertainty.')
uncertainty = max(Q_(upper_uncertainty), Q_(lower_uncertainty))
quant = quant.plus_minus(uncertainty.to(quant.units).magnitude)
else:
raise ValueError('Either "uncertainty" or "upper-uncertainty" and '
'"lower-uncertainty" need to be specified.')
else:
raise ValueError('uncertainty-type must be one of "absolute" or "relative"')
return quant
def get_cantera_composition_string(self, species_conversion=None):
"""Get the composition in a string format suitable for input to Cantera.
Returns a formatted string no matter the type of composition. As such, this method
is not recommended for end users; instead, prefer the `get_cantera_mole_fraction`
or `get_cantera_mass_fraction` methods.
Arguments:
species_conversion (`dict`, optional): Mapping of species identifier to a
species name. This argument should be supplied when the name of the
species in the ChemKED YAML file does not match the name of the same
species in a chemical kinetic mechanism. The species identifier (the key
of the mapping) can be the name, InChI, or SMILES provided in the ChemKED
file, while the value associated with a key should be the desired name in
the Cantera format output string.
Returns:
`str`: String in the ``SPEC:AMT, SPEC:AMT`` format
Raises:
`ValueError`: If the composition type of the `DataPoint` is not one of
``'mass fraction'``, ``'mole fraction'``, or ``'mole percent'``
"""
if self.composition_type in ['mole fraction', 'mass fraction']:
factor = 1.0
elif self.composition_type == 'mole percent':
factor = 100.0
else:
raise ValueError('Unknown composition type: {}'.format(self.composition_type))
if species_conversion is None:
comps = ['{!s}:{:.4e}'.format(c.species_name,
c.amount.magnitude/factor) for c in self.composition.values()]
else:
comps = []
for c in self.composition.values():
amount = c.amount.magnitude/factor
idents = [getattr(c, s, False) for s in ['species_name', 'InChI', 'SMILES']]
present = [i in species_conversion for i in idents]
if not any(present):
comps.append('{!s}:{:.4e}'.format(c.species_name, amount))
else:
if len([i for i in present if i]) > 1:
raise ValueError('More than one conversion present for species {}'.format(
c.species_name))
ident = idents[present.index(True)]
species_replacement_name = species_conversion.pop(ident)
comps.append('{!s}:{:.4e}'.format(species_replacement_name, amount))
if len(species_conversion) > 0:
raise ValueError('Unknown species in conversion: {}'.format(species_conversion))
return ', '.join(comps)
def get_cantera_mole_fraction(self, species_conversion=None):
"""Get the mole fractions in a string format suitable for input to Cantera.
Arguments:
species_conversion (`dict`, optional): Mapping of species identifier to a
species name. This argument should be supplied when the name of the
species in the ChemKED YAML file does not match the name of the same
species in a chemical kinetic mechanism. The species identifier (the key
of the mapping) can be the name, InChI, or SMILES provided in the ChemKED
file, while the value associated with a key should be the desired name in
the Cantera format output string.
Returns:
`str`: String of mole fractions in the ``SPEC:AMT, SPEC:AMT`` format
Raises:
`ValueError`: If the composition type is ``'mass fraction'``, the conversion cannot
be done because no molecular weight information is known
Examples:
>>> dp = DataPoint(properties)
>>> dp.get_cantera_mole_fraction()
'H2:4.4400e-03, O2:5.5600e-03, Ar:9.9000e-01'
>>> species_conversion = {'H2': 'h2', 'O2': 'o2'}
>>> dp.get_cantera_mole_fraction(species_conversion)
'h2:4.4400e-03, o2:5.5600e-03, Ar:9.9000e-01'
>>> species_conversion = {'1S/H2/h1H': 'h2', '1S/O2/c1-2': 'o2'}
>>> dp.get_cantera_mole_fraction(species_conversion)
'h2:4.4400e-03, o2:5.5600e-03, Ar:9.9000e-01'
"""
if self.composition_type == 'mass fraction':
raise ValueError('Cannot get mole fractions from the given composition.\n'
'{}'.format(self.composition))
else:
return self.get_cantera_composition_string(species_conversion)
def get_cantera_mass_fraction(self, species_conversion=None):
"""Get the mass fractions in a string format suitable for input to Cantera.
Arguments:
species_conversion (`dict`, optional): Mapping of species identifier to a
species name. This argument should be supplied when the name of the
species in the ChemKED YAML file does not match the name of the same
species in a chemical kinetic mechanism. The species identifier (the key
of the mapping) can be the name, InChI, or SMILES provided in the ChemKED
file, while the value associated with a key should be the desired name in
the Cantera format output string.
Returns:
`str`: String of mass fractions in the ``SPEC:AMT, SPEC:AMT`` format
Raises:
`ValueError`: If the composition type is ``'mole fraction'`` or
``'mole percent'``, the conversion cannot be done because no molecular
weight information is known
Examples:
>>> dp = DataPoint(properties)
>>> dp.get_cantera_mass_fraction()
'H2:2.2525e-04, O2:4.4775e-03, Ar:9.9530e-01'
>>> species_conversion = {'H2': 'h2', 'O2': 'o2'}
>>> dp.get_cantera_mass_fraction(species_conversion)
'h2:2.2525e-04, o2:4.4775e-03, Ar:9.9530e-01'
>>> species_conversion = {'1S/H2/h1H': 'h2', '1S/O2/c1-2': 'o2'}
>>> dp.get_cantera_mass_fraction(species_conversion)
'h2:2.2525e-04, o2:4.4775e-03, Ar:9.9530e-01'
"""
if self.composition_type in ['mole fraction', 'mole percent']:
raise ValueError('Cannot get mass fractions from the given composition.\n'
'{}'.format(self.composition)
)
else:
return self.get_cantera_composition_string(species_conversion)
| bsd-3-clause |
chen0031/Dato-Core | src/unity/python/graphlab/test/test_sarray_sketch.py | 13 | 11788 | '''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
# from nose import with_setup
# -*- coding: utf-8 -*-
from graphlab.data_structures.sarray import SArray
import pandas as pd
import numpy as np
import unittest
import random
import copy
import os
import math
import shutil
import array
import util
import time
import itertools
#######################################################
# Metrics tracking tests are in test_usage_metrics.py #
#######################################################
class SArraySketchTest(unittest.TestCase):
def setUp(self):
pass
def __validate_sketch_result(self, sketch, sa, delta = 1E-7):
df = pd.DataFrame(list(sa.dropna()))
pds = pd.Series(list(sa.dropna()))
if (sa.dtype() == int or sa.dtype() == float):
if (len(sa) == 0):
self.assertTrue(math.isnan(sketch.min()))
self.assertTrue(math.isnan(sketch.min()))
self.assertEquals(sketch.sum(), 0.0)
self.assertEquals(sketch.mean(), 0.0)
self.assertEquals(sketch.var(), 0.0)
self.assertEquals(sketch.std(), 0.0)
else:
self.assertEquals(sketch.min(), sa.min())
self.assertEquals(sketch.max(), sa.max())
self.assertEquals(sketch.sum(), sa.sum())
self.assertAlmostEqual(sketch.mean(), sa.dropna().mean(), delta=delta)
self.assertAlmostEqual(sketch.var(), sa.dropna().var(), delta=delta)
self.assertAlmostEqual(sketch.std(), sa.dropna().std(), delta=delta)
self.assertAlmostEqual(sketch.quantile(0.5), df.quantile(0.5)[0], delta=1)
self.assertEqual(sketch.quantile(0), df.quantile(0)[0])
self.assertEqual(sketch.quantile(1), df.quantile(1)[0])
self.assertEqual(sketch.frequent_items(), SArray(pds).sketch_summary().frequent_items())
for item in pds.value_counts().index:
self.assertEqual(sketch.frequency_count(item), pds.value_counts()[item])
self.assertAlmostEqual(sketch.num_unique(), len(sa.unique()), delta=3)
else:
with self.assertRaises(RuntimeError):
sketch.quantile((0.5))
self.assertEqual(sketch.num_undefined(), sa.num_missing())
self.assertEqual(sketch.size(), len(sa))
self.assertEqual(sketch.sketch_ready(), True)
self.assertEqual(sketch.num_elements_processed(), sketch.size())
def __validate_nested_sketch_result(self, sa):
sketch = sa.sketch_summary()
self.__validate_sketch_result(sketch, sa)
# element length summary
t = sketch.element_length_summary()
len_sa = sa.dropna().item_length()
self.__validate_sketch_result(t, len_sa)
def test_sketch_int(self):
int_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, None]
sa = SArray(data=int_data)
self.__validate_sketch_result(sa.sketch_summary(), sa)
def test_sketch_float(self):
int_data = [1.2, 3,.4, 6.789, None]
sa = SArray(data=int_data)
self.__validate_sketch_result(sa.sketch_summary(), sa)
def test_vector_sketch(self):
vector_data = [[], [1,2], [3], [4,5,6,7], [8,9,10], None]
sa = SArray(data=vector_data)
sketch = sa.sketch_summary();
self.__validate_sketch_result(sketch, sa)
self.__validate_sketch_result(sketch.element_length_summary(), sa.dropna().item_length())
flattened = list(itertools.chain.from_iterable(list(sa.dropna())))
self.__validate_sketch_result(sketch.element_summary(), SArray(flattened))
fi = sketch.frequent_items()
self.assertEqual(len(fi), 5)
self.assertEqual((fi['[1 2]']), 1)
self.assertEqual((fi['[4 5 6 7]']), 1)
# sub sketch with one key
s = sa.sketch_summary(sub_sketch_keys = 1).element_sub_sketch(1)
expected = sa.vector_slice(1)
self.__validate_sketch_result(s, expected)
# sub sketch with multiple keys
keys = [1,3]
s = sa.sketch_summary(sub_sketch_keys = keys).element_sub_sketch(keys)
self.assertEqual(len(s), len(keys))
for key in keys:
self.assertTrue(s.has_key(key))
expected = sa.vector_slice(key)
self.__validate_sketch_result(s[key], expected)
indexes = range(0,10)
s = sa.sketch_summary(sub_sketch_keys = indexes).element_sub_sketch()
self.assertEqual(len(s), len(indexes))
def test_list_sketch(self):
list_data = [[], [1,2],[1,2], ['a', 'a', 'a', 'b'], [ 1 ,1 , 2], None]
sa = SArray(list_data)
self.__validate_nested_sketch_result(sa)
sketch = sa.sketch_summary();
self.assertEqual(sketch.num_unique(), 4)
element_summary = sketch.element_summary()
another_rep = list(itertools.chain.from_iterable(list(sa.dropna())))
self.__validate_sketch_result(element_summary, SArray(another_rep, str))
fi = sketch.frequent_items()
self.assertEqual(len(fi), 4)
self.assertEqual((fi['[1,2]']), 2)
self.assertEqual((fi['["a","a","a","b"]']), 1)
def test_dict_sketch_int_value(self):
dict_data = [{}, {'a':1, 'b':2}, {'a':1, 'b':2}, {'a':3, 'c':1}, {'a': 1, 'b': 2, 'c': 3}, None]
sa = SArray(data=dict_data)
self.__validate_nested_sketch_result(sa)
sketch = sa.sketch_summary()
self.assertEqual(sketch.num_unique(), 4)
fi = sketch.frequent_items()
self.assertEqual(len(fi), 4)
self.assertEqual((fi['{"a":1, "b":2}']), 2)
self.assertEqual((fi['{"a":3, "c":1}']), 1)
# Get dict key sketch
key_summary = sketch.dict_key_summary()
another_rep = list(itertools.chain.from_iterable(list(sa.dict_keys().dropna())))
self.__validate_sketch_result(key_summary, SArray(another_rep))
# Get dict value sketch
value_summary = sketch.dict_value_summary()
another_rep = list(itertools.chain.from_iterable(list(sa.dict_values().dropna())))
self.__validate_sketch_result(value_summary, SArray(another_rep))
# sub sketch with one key
s = sa.sketch_summary(sub_sketch_keys ='a').element_sub_sketch('a')
expected = sa.unpack(column_name_prefix="")['a']
self.__validate_sketch_result(s, expected)
s = sa.sketch_summary(sub_sketch_keys ='Nonexist').element_sub_sketch('Nonexist')
self.assertEqual(s.num_undefined(), len(sa))
# sub sketch with multiple keys
keys = ['a', 'b']
s = sa.sketch_summary(sub_sketch_keys =keys).element_sub_sketch(keys)
self.assertEqual(len(s), len(keys))
for key in keys:
self.assertTrue(s.has_key(key))
expected = sa.unpack(column_name_prefix="")[key]
self.__validate_sketch_result(s[key], expected)
def test_dict_sketch_str_value(self):
# Dict value sketch type should be auto inferred
dict_data = [{'a':'b', 'b':'c'}, {'a':'b', 'b':'c'}, {'a':'d', 'b':'4'}, None]
sa = SArray(data=dict_data)
self.__validate_nested_sketch_result(sa)
sketch = sa.sketch_summary()
fi = sketch.frequent_items()
self.assertEqual(len(fi), 2)
self.assertEqual(fi['{"a":"b", "b":"c"}'], 2)
self.assertEqual(fi['{"a":"d", "b":"4"}'], 1)
# Get dict key sketch
key_summary = sketch.dict_key_summary()
another_rep = list(itertools.chain.from_iterable(list(sa.dict_keys().dropna())))
self.__validate_sketch_result(key_summary, SArray(another_rep))
# Get dict value sketch
value_summary = sketch.dict_value_summary()
another_rep = list(itertools.chain.from_iterable(list(sa.dict_values().dropna())))
self.__validate_sketch_result(value_summary, SArray(another_rep))
# sub sketch with one key
s = sa.sketch_summary(sub_sketch_keys ='a').element_sub_sketch('a')
expected = sa.unpack(column_name_prefix="")['a']
self.__validate_sketch_result(s, expected)
s = sa.sketch_summary(sub_sketch_keys ='Nonexist').element_sub_sketch('Nonexist')
self.assertEqual(s.num_undefined(), len(sa))
# sub sketch with multiple keys
keys = ['a', 'b']
s = sa.sketch_summary(sub_sketch_keys =keys).element_sub_sketch(keys)
self.assertEqual(len(s), len(keys))
for key in keys:
self.assertTrue(s.has_key(key))
expected = sa.unpack(column_name_prefix="")[key]
self.__validate_sketch_result(s[key], expected)
# allow pass in empty keys, which will retrieve all keys
s = sa.sketch_summary(sub_sketch_keys =keys).element_sub_sketch()
self.assertEqual(len(s), len(keys))
for key in keys:
self.assertTrue(s.has_key(key))
expected = sa.unpack(column_name_prefix="")[key]
self.__validate_sketch_result(s[key], expected)
def test_str_sketch(self):
str_data = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", None]
sa = SArray(data=str_data)
sketch = sa.sketch_summary()
with self.assertRaises(RuntimeError):
sketch.min()
with self.assertRaises(RuntimeError):
sketch.max()
with self.assertRaises(RuntimeError):
sketch.sum()
with self.assertRaises(RuntimeError):
sketch.mean()
with self.assertRaises(RuntimeError):
sketch.var()
with self.assertRaises(RuntimeError):
sketch.std()
self.assertAlmostEqual(sketch.num_unique(), 10, delta=3)
self.assertEqual(sketch.num_undefined(), 1)
self.assertEqual(sketch.size(), len(str_data))
with self.assertRaises(RuntimeError):
sketch.quantile(0.5)
self.assertEqual(sketch.frequency_count("1"), 1)
self.assertEqual(sketch.frequency_count("2"), 1)
t = sketch.frequent_items()
self.assertEqual(len(t), 10)
def test_empty_sketch(self):
int_data = []
sa = SArray(data=int_data)
sketch = sa.sketch_summary()
self.assertTrue(math.isnan(sketch.min()))
self.assertTrue(math.isnan(sketch.max()))
self.assertEquals(sketch.sum(), 0)
self.assertEqual(sketch.mean(), 0)
self.assertEqual(sketch.var(), 0)
self.assertEqual(sketch.std(), 0)
self.assertEqual(sketch.num_unique(), 0)
self.assertEqual(sketch.num_undefined(),0)
self.assertEqual(sketch.size(), 0)
with self.assertRaises(RuntimeError):
sketch.quantile(0.5)
t = sketch.frequent_items()
self.assertEqual(len(t), 0)
def test_background_sketch(self):
dict_data = [{str(i):1} for i in range(1,10000)]
sa = SArray(dict_data)
s = sa.sketch_summary(background=True, sub_sketch_keys=[str(i ) for i in range(100,200)])
s.sketch_ready() # cannot check the actual value as it depends on the speed of processing
t = s.element_sub_sketch([str(i) for i in range(100, 105)])
self.assertEqual(len(t), 5)
def test_large_value_sketch(self):
sa = SArray([1234567890 for i in range(100)])
sk = sa.sketch_summary();
self.__validate_sketch_result(sa.sketch_summary(), sa, 1E-5)
def test_cancelation(self):
sa = SArray(range(1,10000))
s = sa.sketch_summary(background=True)
s.cancel()
# this can be rather non-deterministic, so there is very little
# real output validation that can be done...
| agpl-3.0 |
tomlof/scikit-learn | examples/plot_digits_pipe.py | 65 | 1652 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
# Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
mdhaber/scipy | scipy/optimize/_lsq/least_squares.py | 12 | 39190 | """Generic interface for least-squares minimization."""
from warnings import warn
import numpy as np
from numpy.linalg import norm
from scipy.sparse import issparse, csr_matrix
from scipy.sparse.linalg import LinearOperator
from scipy.optimize import _minpack, OptimizeResult
from scipy.optimize._numdiff import approx_derivative, group_columns
from .trf import trf
from .dogbox import dogbox
from .common import EPS, in_bounds, make_strictly_feasible
TERMINATION_MESSAGES = {
-1: "Improper input parameters status returned from `leastsq`",
0: "The maximum number of function evaluations is exceeded.",
1: "`gtol` termination condition is satisfied.",
2: "`ftol` termination condition is satisfied.",
3: "`xtol` termination condition is satisfied.",
4: "Both `ftol` and `xtol` termination conditions are satisfied."
}
FROM_MINPACK_TO_COMMON = {
0: -1, # Improper input parameters from MINPACK.
1: 2,
2: 3,
3: 4,
4: 1,
5: 0
# There are 6, 7, 8 for too small tolerance parameters,
# but we guard against it by checking ftol, xtol, gtol beforehand.
}
def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step):
n = x0.size
if diff_step is None:
epsfcn = EPS
else:
epsfcn = diff_step**2
# Compute MINPACK's `diag`, which is inverse of our `x_scale` and
# ``x_scale='jac'`` corresponds to ``diag=None``.
if isinstance(x_scale, str) and x_scale == 'jac':
diag = None
else:
diag = 1 / x_scale
full_output = True
col_deriv = False
factor = 100.0
if jac is None:
if max_nfev is None:
# n squared to account for Jacobian evaluations.
max_nfev = 100 * n * (n + 1)
x, info, status = _minpack._lmdif(
fun, x0, (), full_output, ftol, xtol, gtol,
max_nfev, epsfcn, factor, diag)
else:
if max_nfev is None:
max_nfev = 100 * n
x, info, status = _minpack._lmder(
fun, jac, x0, (), full_output, col_deriv,
ftol, xtol, gtol, max_nfev, factor, diag)
f = info['fvec']
if callable(jac):
J = jac(x)
else:
J = np.atleast_2d(approx_derivative(fun, x))
cost = 0.5 * np.dot(f, f)
g = J.T.dot(f)
g_norm = norm(g, ord=np.inf)
nfev = info['nfev']
njev = info.get('njev', None)
status = FROM_MINPACK_TO_COMMON[status]
active_mask = np.zeros_like(x0, dtype=int)
return OptimizeResult(
x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm,
active_mask=active_mask, nfev=nfev, njev=njev, status=status)
def prepare_bounds(bounds, n):
lb, ub = [np.asarray(b, dtype=float) for b in bounds]
if lb.ndim == 0:
lb = np.resize(lb, n)
if ub.ndim == 0:
ub = np.resize(ub, n)
return lb, ub
def check_tolerance(ftol, xtol, gtol, method):
def check(tol, name):
if tol is None:
tol = 0
elif tol < EPS:
warn("Setting `{}` below the machine epsilon ({:.2e}) effectively "
"disables the corresponding termination condition."
.format(name, EPS))
return tol
ftol = check(ftol, "ftol")
xtol = check(xtol, "xtol")
gtol = check(gtol, "gtol")
if method == "lm" and (ftol < EPS or xtol < EPS or gtol < EPS):
raise ValueError("All tolerances must be higher than machine epsilon "
"({:.2e}) for method 'lm'.".format(EPS))
elif ftol < EPS and xtol < EPS and gtol < EPS:
raise ValueError("At least one of the tolerances must be higher than "
"machine epsilon ({:.2e}).".format(EPS))
return ftol, xtol, gtol
def check_x_scale(x_scale, x0):
if isinstance(x_scale, str) and x_scale == 'jac':
return x_scale
try:
x_scale = np.asarray(x_scale, dtype=float)
valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0)
except (ValueError, TypeError):
valid = False
if not valid:
raise ValueError("`x_scale` must be 'jac' or array_like with "
"positive numbers.")
if x_scale.ndim == 0:
x_scale = np.resize(x_scale, x0.shape)
if x_scale.shape != x0.shape:
raise ValueError("Inconsistent shapes between `x_scale` and `x0`.")
return x_scale
def check_jac_sparsity(jac_sparsity, m, n):
if jac_sparsity is None:
return None
if not issparse(jac_sparsity):
jac_sparsity = np.atleast_2d(jac_sparsity)
if jac_sparsity.shape != (m, n):
raise ValueError("`jac_sparsity` has wrong shape.")
return jac_sparsity, group_columns(jac_sparsity)
# Loss functions.
def huber(z, rho, cost_only):
mask = z <= 1
rho[0, mask] = z[mask]
rho[0, ~mask] = 2 * z[~mask]**0.5 - 1
if cost_only:
return
rho[1, mask] = 1
rho[1, ~mask] = z[~mask]**-0.5
rho[2, mask] = 0
rho[2, ~mask] = -0.5 * z[~mask]**-1.5
def soft_l1(z, rho, cost_only):
t = 1 + z
rho[0] = 2 * (t**0.5 - 1)
if cost_only:
return
rho[1] = t**-0.5
rho[2] = -0.5 * t**-1.5
def cauchy(z, rho, cost_only):
rho[0] = np.log1p(z)
if cost_only:
return
t = 1 + z
rho[1] = 1 / t
rho[2] = -1 / t**2
def arctan(z, rho, cost_only):
rho[0] = np.arctan(z)
if cost_only:
return
t = 1 + z**2
rho[1] = 1 / t
rho[2] = -2 * z / t**2
IMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1,
cauchy=cauchy, arctan=arctan)
def construct_loss_function(m, loss, f_scale):
if loss == 'linear':
return None
if not callable(loss):
loss = IMPLEMENTED_LOSSES[loss]
rho = np.empty((3, m))
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
loss(z, rho, cost_only=cost_only)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
else:
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
rho = loss(z)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
return loss_function
def least_squares(
fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf',
ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear',
f_scale=1.0, diff_step=None, tr_solver=None, tr_options={},
jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}):
"""Solve a nonlinear least-squares problem with bounds on the variables.
Given the residuals f(x) (an m-D real function of n real
variables) and the loss function rho(s) (a scalar function), `least_squares`
finds a local minimum of the cost function F(x)::
minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1)
subject to lb <= x <= ub
The purpose of the loss function rho(s) is to reduce the influence of
outliers on the solution.
Parameters
----------
fun : callable
Function which computes the vector of residuals, with the signature
``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with
respect to its first argument. The argument ``x`` passed to this
function is an ndarray of shape (n,) (never a scalar, even for n=1).
It must allocate and return a 1-D array_like of shape (m,) or a scalar.
If the argument ``x`` is complex or the function ``fun`` returns
complex residuals, it must be wrapped in a real function of real
arguments, as shown at the end of the Examples section.
x0 : array_like with shape (n,) or float
Initial guess on independent variables. If float, it will be treated
as a 1-D array with one element.
jac : {'2-point', '3-point', 'cs', callable}, optional
Method of computing the Jacobian matrix (an m-by-n matrix, where
element (i, j) is the partial derivative of f[i] with respect to
x[j]). The keywords select a finite difference scheme for numerical
estimation. The scheme '3-point' is more accurate, but requires
twice as many operations as '2-point' (default). The scheme 'cs'
uses complex steps, and while potentially the most accurate, it is
applicable only when `fun` correctly handles complex inputs and
can be analytically continued to the complex plane. Method 'lm'
always uses the '2-point' scheme. If callable, it is used as
``jac(x, *args, **kwargs)`` and should return a good approximation
(or the exact value) for the Jacobian as an array_like (np.atleast_2d
is applied), a sparse matrix (csr_matrix preferred for performance) or
a `scipy.sparse.linalg.LinearOperator`.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each array must match the size of `x0` or be a scalar, in the latter
case a bound will be the same for all variables. Use ``np.inf`` with
an appropriate sign to disable bounds on all or some variables.
method : {'trf', 'dogbox', 'lm'}, optional
Algorithm to perform minimization.
* 'trf' : Trust Region Reflective algorithm, particularly suitable
for large sparse problems with bounds. Generally robust method.
* 'dogbox' : dogleg algorithm with rectangular trust regions,
typical use case is small problems with bounds. Not recommended
for problems with rank-deficient Jacobian.
* 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK.
Doesn't handle bounds and sparse Jacobians. Usually the most
efficient method for small unconstrained problems.
Default is 'trf'. See Notes for more information.
ftol : float or None, optional
Tolerance for termination by the change of the cost function. Default
is 1e-8. The optimization process is stopped when ``dF < ftol * F``,
and there was an adequate agreement between a local quadratic model and
the true model in the last step.
If None and 'method' is not 'lm', the termination by this condition is
disabled. If 'method' is 'lm', this tolerance must be higher than
machine epsilon.
xtol : float or None, optional
Tolerance for termination by the change of the independent variables.
Default is 1e-8. The exact condition depends on the `method` used:
* For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``.
* For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is
a trust-region radius and ``xs`` is the value of ``x``
scaled according to `x_scale` parameter (see below).
If None and 'method' is not 'lm', the termination by this condition is
disabled. If 'method' is 'lm', this tolerance must be higher than
machine epsilon.
gtol : float or None, optional
Tolerance for termination by the norm of the gradient. Default is 1e-8.
The exact condition depends on a `method` used:
* For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where
``g_scaled`` is the value of the gradient scaled to account for
the presence of the bounds [STIR]_.
* For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where
``g_free`` is the gradient with respect to the variables which
are not in the optimal state on the boundary.
* For 'lm' : the maximum absolute value of the cosine of angles
between columns of the Jacobian and the residual vector is less
than `gtol`, or the residual vector is zero.
If None and 'method' is not 'lm', the termination by this condition is
disabled. If 'method' is 'lm', this tolerance must be higher than
machine epsilon.
x_scale : array_like or 'jac', optional
Characteristic scale of each variable. Setting `x_scale` is equivalent
to reformulating the problem in scaled variables ``xs = x / x_scale``.
An alternative view is that the size of a trust region along jth
dimension is proportional to ``x_scale[j]``. Improved convergence may
be achieved by setting `x_scale` such that a step of a given size
along any of the scaled variables has a similar effect on the cost
function. If set to 'jac', the scale is iteratively updated using the
inverse norms of the columns of the Jacobian matrix (as described in
[JJMore]_).
loss : str or callable, optional
Determines the loss function. The following keyword values are allowed:
* 'linear' (default) : ``rho(z) = z``. Gives a standard
least-squares problem.
* 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth
approximation of l1 (absolute value) loss. Usually a good
choice for robust least squares.
* 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works
similarly to 'soft_l1'.
* 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers
influence, but may cause difficulties in optimization process.
* 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on
a single residual, has properties similar to 'cauchy'.
If callable, it must take a 1-D ndarray ``z=f**2`` and return an
array_like with shape (3, m) where row 0 contains function values,
row 1 contains first derivatives and row 2 contains second
derivatives. Method 'lm' supports only 'linear' loss.
f_scale : float, optional
Value of soft margin between inlier and outlier residuals, default
is 1.0. The loss function is evaluated as follows
``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`,
and ``rho`` is determined by `loss` parameter. This parameter has
no effect with ``loss='linear'``, but for other `loss` values it is
of crucial importance.
max_nfev : None or int, optional
Maximum number of function evaluations before the termination.
If None (default), the value is chosen automatically:
* For 'trf' and 'dogbox' : 100 * n.
* For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1)
otherwise (because 'lm' counts function calls in Jacobian
estimation).
diff_step : None or array_like, optional
Determines the relative step size for the finite difference
approximation of the Jacobian. The actual step is computed as
``x * diff_step``. If None (default), then `diff_step` is taken to be
a conventional "optimal" power of machine epsilon for the finite
difference scheme used [NR]_.
tr_solver : {None, 'exact', 'lsmr'}, optional
Method for solving trust-region subproblems, relevant only for 'trf'
and 'dogbox' methods.
* 'exact' is suitable for not very large problems with dense
Jacobian matrices. The computational complexity per iteration is
comparable to a singular value decomposition of the Jacobian
matrix.
* 'lsmr' is suitable for problems with sparse and large Jacobian
matrices. It uses the iterative procedure
`scipy.sparse.linalg.lsmr` for finding a solution of a linear
least-squares problem and only requires matrix-vector product
evaluations.
If None (default), the solver is chosen based on the type of Jacobian
returned on the first iteration.
tr_options : dict, optional
Keyword options passed to trust-region solver.
* ``tr_solver='exact'``: `tr_options` are ignored.
* ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.
Additionally, ``method='trf'`` supports 'regularize' option
(bool, default is True), which adds a regularization term to the
normal equation, which improves convergence if the Jacobian is
rank-deficient [Byrd]_ (eq. 3.4).
jac_sparsity : {None, array_like, sparse matrix}, optional
Defines the sparsity structure of the Jacobian matrix for finite
difference estimation, its shape must be (m, n). If the Jacobian has
only few non-zero elements in *each* row, providing the sparsity
structure will greatly speed up the computations [Curtis]_. A zero
entry means that a corresponding element in the Jacobian is identically
zero. If provided, forces the use of 'lsmr' trust-region solver.
If None (default), then dense differencing will be used. Has no effect
for 'lm' method.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations (not supported by 'lm'
method).
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun` and `jac`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)`` and the same for
`jac`.
Returns
-------
result : OptimizeResult
`OptimizeResult` with the following fields defined:
x : ndarray, shape (n,)
Solution found.
cost : float
Value of the cost function at the solution.
fun : ndarray, shape (m,)
Vector of residuals at the solution.
jac : ndarray, sparse matrix or LinearOperator, shape (m, n)
Modified Jacobian matrix at the solution, in the sense that J^T J
is a Gauss-Newton approximation of the Hessian of the cost function.
The type is the same as the one used by the algorithm.
grad : ndarray, shape (m,)
Gradient of the cost function at the solution.
optimality : float
First-order optimality measure. In unconstrained problems, it is
always the uniform norm of the gradient. In constrained problems,
it is the quantity which was compared with `gtol` during iterations.
active_mask : ndarray of int, shape (n,)
Each component shows whether a corresponding constraint is active
(that is, whether a variable is at the bound):
* 0 : a constraint is not active.
* -1 : a lower bound is active.
* 1 : an upper bound is active.
Might be somewhat arbitrary for 'trf' method as it generates a
sequence of strictly feasible iterates and `active_mask` is
determined within a tolerance threshold.
nfev : int
Number of function evaluations done. Methods 'trf' and 'dogbox' do
not count function calls for numerical Jacobian approximation, as
opposed to 'lm' method.
njev : int or None
Number of Jacobian evaluations done. If numerical Jacobian
approximation is used in 'lm' method, it is set to None.
status : int
The reason for algorithm termination:
* -1 : improper input parameters status returned from MINPACK.
* 0 : the maximum number of function evaluations is exceeded.
* 1 : `gtol` termination condition is satisfied.
* 2 : `ftol` termination condition is satisfied.
* 3 : `xtol` termination condition is satisfied.
* 4 : Both `ftol` and `xtol` termination conditions are satisfied.
message : str
Verbal description of the termination reason.
success : bool
True if one of the convergence criteria is satisfied (`status` > 0).
See Also
--------
leastsq : A legacy wrapper for the MINPACK implementation of the
Levenberg-Marquadt algorithm.
curve_fit : Least-squares minimization applied to a curve-fitting problem.
Notes
-----
Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares
algorithms implemented in MINPACK (lmder, lmdif). It runs the
Levenberg-Marquardt algorithm formulated as a trust-region type algorithm.
The implementation is based on paper [JJMore]_, it is very robust and
efficient with a lot of smart tricks. It should be your first choice
for unconstrained problems. Note that it doesn't support bounds. Also,
it doesn't work when m < n.
Method 'trf' (Trust Region Reflective) is motivated by the process of
solving a system of equations, which constitute the first-order optimality
condition for a bound-constrained minimization problem as formulated in
[STIR]_. The algorithm iteratively solves trust-region subproblems
augmented by a special diagonal quadratic term and with trust-region shape
determined by the distance from the bounds and the direction of the
gradient. This enhancements help to avoid making steps directly into bounds
and efficiently explore the whole space of variables. To further improve
convergence, the algorithm considers search directions reflected from the
bounds. To obey theoretical requirements, the algorithm keeps iterates
strictly feasible. With dense Jacobians trust-region subproblems are
solved by an exact method very similar to the one described in [JJMore]_
(and implemented in MINPACK). The difference from the MINPACK
implementation is that a singular value decomposition of a Jacobian
matrix is done once per iteration, instead of a QR decomposition and series
of Givens rotation eliminations. For large sparse Jacobians a 2-D subspace
approach of solving trust-region subproblems is used [STIR]_, [Byrd]_.
The subspace is spanned by a scaled gradient and an approximate
Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no
constraints are imposed the algorithm is very similar to MINPACK and has
generally comparable performance. The algorithm works quite robust in
unbounded and bounded problems, thus it is chosen as a default algorithm.
Method 'dogbox' operates in a trust-region framework, but considers
rectangular trust regions as opposed to conventional ellipsoids [Voglis]_.
The intersection of a current trust region and initial bounds is again
rectangular, so on each iteration a quadratic minimization problem subject
to bound constraints is solved approximately by Powell's dogleg method
[NumOpt]_. The required Gauss-Newton step can be computed exactly for
dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large
sparse Jacobians. The algorithm is likely to exhibit slow convergence when
the rank of Jacobian is less than the number of variables. The algorithm
often outperforms 'trf' in bounded problems with a small number of
variables.
Robust loss functions are implemented as described in [BA]_. The idea
is to modify a residual vector and a Jacobian matrix on each iteration
such that computed gradient and Gauss-Newton Hessian approximation match
the true gradient and Hessian approximation of the cost function. Then
the algorithm proceeds in a normal way, i.e., robust loss functions are
implemented as a simple wrapper over standard least-squares algorithms.
.. versionadded:: 0.17.0
References
----------
.. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
and Conjugate Gradient Method for Large-Scale Bound-Constrained
Minimization Problems," SIAM Journal on Scientific Computing,
Vol. 21, Number 1, pp 1-23, 1999.
.. [NR] William H. Press et. al., "Numerical Recipes. The Art of Scientific
Computing. 3rd edition", Sec. 5.7.
.. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, "Approximate
solution of the trust region problem by minimization over
two-dimensional subspaces", Math. Programming, 40, pp. 247-263,
1988.
.. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of
Mathematics and its Applications, 13, pp. 117-120, 1974.
.. [JJMore] J. J. More, "The Levenberg-Marquardt Algorithm: Implementation
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region
Dogleg Approach for Unconstrained and Bound Constrained
Nonlinear Optimization", WSEAS International Conference on
Applied Mathematics, Corfu, Greece, 2004.
.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization,
2nd edition", Chapter 4.
.. [BA] B. Triggs et. al., "Bundle Adjustment - A Modern Synthesis",
Proceedings of the International Workshop on Vision Algorithms:
Theory and Practice, pp. 298-372, 1999.
Examples
--------
In this example we find a minimum of the Rosenbrock function without bounds
on independent variables.
>>> def fun_rosenbrock(x):
... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
Notice that we only provide the vector of the residuals. The algorithm
constructs the cost function as a sum of squares of the residuals, which
gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``.
>>> from scipy.optimize import least_squares
>>> x0_rosenbrock = np.array([2, 2])
>>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock)
>>> res_1.x
array([ 1., 1.])
>>> res_1.cost
9.8669242910846867e-30
>>> res_1.optimality
8.8928864934219529e-14
We now constrain the variables, in such a way that the previous solution
becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and
``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter
to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``.
We also provide the analytic Jacobian:
>>> def jac_rosenbrock(x):
... return np.array([
... [-20 * x[0], 10],
... [-1, 0]])
Putting this all together, we see that the new solution lies on the bound:
>>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock,
... bounds=([-np.inf, 1.5], np.inf))
>>> res_2.x
array([ 1.22437075, 1.5 ])
>>> res_2.cost
0.025213093946805685
>>> res_2.optimality
1.5885401433157753e-07
Now we solve a system of equations (i.e., the cost function should be zero
at a minimum) for a Broyden tridiagonal vector-valued function of 100000
variables:
>>> def fun_broyden(x):
... f = (3 - x) * x + 1
... f[1:] -= x[:-1]
... f[:-1] -= 2 * x[1:]
... return f
The corresponding Jacobian matrix is sparse. We tell the algorithm to
estimate it by finite differences and provide the sparsity structure of
Jacobian to significantly speed up this process.
>>> from scipy.sparse import lil_matrix
>>> def sparsity_broyden(n):
... sparsity = lil_matrix((n, n), dtype=int)
... i = np.arange(n)
... sparsity[i, i] = 1
... i = np.arange(1, n)
... sparsity[i, i - 1] = 1
... i = np.arange(n - 1)
... sparsity[i, i + 1] = 1
... return sparsity
...
>>> n = 100000
>>> x0_broyden = -np.ones(n)
...
>>> res_3 = least_squares(fun_broyden, x0_broyden,
... jac_sparsity=sparsity_broyden(n))
>>> res_3.cost
4.5687069299604613e-23
>>> res_3.optimality
1.1650454296851518e-11
Let's also solve a curve fitting problem using robust loss function to
take care of outliers in the data. Define the model function as
``y = a + b * exp(c * t)``, where t is a predictor variable, y is an
observation and a, b, c are parameters to estimate.
First, define the function which generates the data with noise and
outliers, define the model parameters, and generate data:
>>> from numpy.random import default_rng
>>> rng = default_rng()
>>> def gen_data(t, a, b, c, noise=0., n_outliers=0, seed=None):
... rng = default_rng(seed)
...
... y = a + b * np.exp(t * c)
...
... error = noise * rng.standard_normal(t.size)
... outliers = rng.integers(0, t.size, n_outliers)
... error[outliers] *= 10
...
... return y + error
...
>>> a = 0.5
>>> b = 2.0
>>> c = -1
>>> t_min = 0
>>> t_max = 10
>>> n_points = 15
...
>>> t_train = np.linspace(t_min, t_max, n_points)
>>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3)
Define function for computing residuals and initial estimate of
parameters.
>>> def fun(x, t, y):
... return x[0] + x[1] * np.exp(x[2] * t) - y
...
>>> x0 = np.array([1.0, 1.0, 0.0])
Compute a standard least-squares solution:
>>> res_lsq = least_squares(fun, x0, args=(t_train, y_train))
Now compute two solutions with two different robust loss functions. The
parameter `f_scale` is set to 0.1, meaning that inlier residuals should
not significantly exceed 0.1 (the noise level used).
>>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1,
... args=(t_train, y_train))
>>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1,
... args=(t_train, y_train))
And, finally, plot all the curves. We see that by selecting an appropriate
`loss` we can get estimates close to optimal even in the presence of
strong outliers. But keep in mind that generally it is recommended to try
'soft_l1' or 'huber' losses first (if at all necessary) as the other two
options may cause difficulties in optimization process.
>>> t_test = np.linspace(t_min, t_max, n_points * 10)
>>> y_true = gen_data(t_test, a, b, c)
>>> y_lsq = gen_data(t_test, *res_lsq.x)
>>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x)
>>> y_log = gen_data(t_test, *res_log.x)
...
>>> import matplotlib.pyplot as plt
>>> plt.plot(t_train, y_train, 'o')
>>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true')
>>> plt.plot(t_test, y_lsq, label='linear loss')
>>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss')
>>> plt.plot(t_test, y_log, label='cauchy loss')
>>> plt.xlabel("t")
>>> plt.ylabel("y")
>>> plt.legend()
>>> plt.show()
In the next example, we show how complex-valued residual functions of
complex variables can be optimized with ``least_squares()``. Consider the
following function:
>>> def f(z):
... return z - (0.5 + 0.5j)
We wrap it into a function of real variables that returns real residuals
by simply handling the real and imaginary parts as independent variables:
>>> def f_wrap(x):
... fx = f(x[0] + 1j*x[1])
... return np.array([fx.real, fx.imag])
Thus, instead of the original m-D complex function of n complex
variables we optimize a 2m-D real function of 2n real variables:
>>> from scipy.optimize import least_squares
>>> res_wrapped = least_squares(f_wrap, (0.1, 0.1), bounds=([0, 0], [1, 1]))
>>> z = res_wrapped.x[0] + res_wrapped.x[1]*1j
>>> z
(0.49999999999925893+0.49999999999925893j)
"""
if method not in ['trf', 'dogbox', 'lm']:
raise ValueError("`method` must be 'trf', 'dogbox' or 'lm'.")
if jac not in ['2-point', '3-point', 'cs'] and not callable(jac):
raise ValueError("`jac` must be '2-point', '3-point', 'cs' or "
"callable.")
if tr_solver not in [None, 'exact', 'lsmr']:
raise ValueError("`tr_solver` must be None, 'exact' or 'lsmr'.")
if loss not in IMPLEMENTED_LOSSES and not callable(loss):
raise ValueError("`loss` must be one of {0} or a callable."
.format(IMPLEMENTED_LOSSES.keys()))
if method == 'lm' and loss != 'linear':
raise ValueError("method='lm' supports only 'linear' loss function.")
if verbose not in [0, 1, 2]:
raise ValueError("`verbose` must be in [0, 1, 2].")
if len(bounds) != 2:
raise ValueError("`bounds` must contain 2 elements.")
if max_nfev is not None and max_nfev <= 0:
raise ValueError("`max_nfev` must be None or positive integer.")
if np.iscomplexobj(x0):
raise ValueError("`x0` must be real.")
x0 = np.atleast_1d(x0).astype(float)
if x0.ndim > 1:
raise ValueError("`x0` must have at most 1 dimension.")
lb, ub = prepare_bounds(bounds, x0.shape[0])
if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)):
raise ValueError("Method 'lm' doesn't support bounds.")
if lb.shape != x0.shape or ub.shape != x0.shape:
raise ValueError("Inconsistent shapes between bounds and `x0`.")
if np.any(lb >= ub):
raise ValueError("Each lower bound must be strictly less than each "
"upper bound.")
if not in_bounds(x0, lb, ub):
raise ValueError("`x0` is infeasible.")
x_scale = check_x_scale(x_scale, x0)
ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol, method)
def fun_wrapped(x):
return np.atleast_1d(fun(x, *args, **kwargs))
if method == 'trf':
x0 = make_strictly_feasible(x0, lb, ub)
f0 = fun_wrapped(x0)
if f0.ndim != 1:
raise ValueError("`fun` must return at most 1-d array_like. "
"f0.shape: {0}".format(f0.shape))
if not np.all(np.isfinite(f0)):
raise ValueError("Residuals are not finite in the initial point.")
n = x0.size
m = f0.size
if method == 'lm' and m < n:
raise ValueError("Method 'lm' doesn't work when the number of "
"residuals is less than the number of variables.")
loss_function = construct_loss_function(m, loss, f_scale)
if callable(loss):
rho = loss_function(f0)
if rho.shape != (3, m):
raise ValueError("The return value of `loss` callable has wrong "
"shape.")
initial_cost = 0.5 * np.sum(rho[0])
elif loss_function is not None:
initial_cost = loss_function(f0, cost_only=True)
else:
initial_cost = 0.5 * np.dot(f0, f0)
if callable(jac):
J0 = jac(x0, *args, **kwargs)
if issparse(J0):
J0 = J0.tocsr()
def jac_wrapped(x, _=None):
return jac(x, *args, **kwargs).tocsr()
elif isinstance(J0, LinearOperator):
def jac_wrapped(x, _=None):
return jac(x, *args, **kwargs)
else:
J0 = np.atleast_2d(J0)
def jac_wrapped(x, _=None):
return np.atleast_2d(jac(x, *args, **kwargs))
else: # Estimate Jacobian by finite differences.
if method == 'lm':
if jac_sparsity is not None:
raise ValueError("method='lm' does not support "
"`jac_sparsity`.")
if jac != '2-point':
warn("jac='{0}' works equivalently to '2-point' "
"for method='lm'.".format(jac))
J0 = jac_wrapped = None
else:
if jac_sparsity is not None and tr_solver == 'exact':
raise ValueError("tr_solver='exact' is incompatible "
"with `jac_sparsity`.")
jac_sparsity = check_jac_sparsity(jac_sparsity, m, n)
def jac_wrapped(x, f):
J = approx_derivative(fun, x, rel_step=diff_step, method=jac,
f0=f, bounds=bounds, args=args,
kwargs=kwargs, sparsity=jac_sparsity)
if J.ndim != 2: # J is guaranteed not sparse.
J = np.atleast_2d(J)
return J
J0 = jac_wrapped(x0, f0)
if J0 is not None:
if J0.shape != (m, n):
raise ValueError(
"The return value of `jac` has wrong shape: expected {0}, "
"actual {1}.".format((m, n), J0.shape))
if not isinstance(J0, np.ndarray):
if method == 'lm':
raise ValueError("method='lm' works only with dense "
"Jacobian matrices.")
if tr_solver == 'exact':
raise ValueError(
"tr_solver='exact' works only with dense "
"Jacobian matrices.")
jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
if isinstance(J0, LinearOperator) and jac_scale:
raise ValueError("x_scale='jac' can't be used when `jac` "
"returns LinearOperator.")
if tr_solver is None:
if isinstance(J0, np.ndarray):
tr_solver = 'exact'
else:
tr_solver = 'lsmr'
if method == 'lm':
result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol,
max_nfev, x_scale, diff_step)
elif method == 'trf':
result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol,
gtol, max_nfev, x_scale, loss_function, tr_solver,
tr_options.copy(), verbose)
elif method == 'dogbox':
if tr_solver == 'lsmr' and 'regularize' in tr_options:
warn("The keyword 'regularize' in `tr_options` is not relevant "
"for 'dogbox' method.")
tr_options = tr_options.copy()
del tr_options['regularize']
result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol,
xtol, gtol, max_nfev, x_scale, loss_function,
tr_solver, tr_options, verbose)
result.message = TERMINATION_MESSAGES[result.status]
result.success = result.status > 0
if verbose >= 1:
print(result.message)
print("Function evaluations {0}, initial cost {1:.4e}, final cost "
"{2:.4e}, first-order optimality {3:.2e}."
.format(result.nfev, initial_cost, result.cost,
result.optimality))
return result
| bsd-3-clause |
chrisburr/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 73 | 1232 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
lw = 2
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], color='gold', lw=lw,
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), color='teal', lw=lw,
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), color='yellowgreen', lw=lw,
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), color='cornflowerblue', lw=lw,
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, color='orange', lw=lw,
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), color='darkorchid', lw=lw,
linestyle='--', label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
stylianos-kampakis/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 216 | 8091 | from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
| bsd-3-clause |
aerokappa/SantaClaus | handCodedOptimum_v4.py | 1 | 2216 | import numpy as np
import pandas as pd
from processInput import processInput
def handCodedOptimum_v4 ( ):
fileName = 'gifts.csv'
giftList, giftListSummary = processInput( fileName )
packedBags = []
for i in np.arange(1000):
print i
currentBag = []
if (i< 333):
itemCount = np.array([0 ,3 ,0 ,0 ,0 ,0 ,0 ,3 ,0])
elif ((i>=333) & (i<458)):
itemCount = np.array([8, 0, 0, 0, 0, 0, 0, 0, 0])
elif ((i>=458) & (i<583)):
itemCount = np.array([0, 0, 0, 0, 0, 0, 8, 0, 0])
elif ((i>=583) & (i<916)):
itemCount = np.array([0, 0, 0, 3, 0, 2, 0, 0, 0])
elif ((i>=916) & (i<924)):
itemCount = np.array([ 0, 0, 0, 0, 0, 0, 0, 0, 25])
elif ((i>=924) & (i<928)):
itemCount = np.array([ 0, 23, 0, 0, 0, 0, 0, 0, 0])
elif ((i>=928) & (i<938)):
itemCount = np.array([ 0, 0, 0, 0, 0, 19, 0, 0, 0])
elif ((i>=938) & (i<939)):
itemCount = np.array([ 0, 0, 0, 0, 0, 11, 0, 1, 0])
elif ((i>=939) & (i<940)):
itemCount = np.array([0, 9, 0, 1, 0, 0, 0, 0, 0])
else:
itemCount = np.array([0, 0, 1, 0, 0, 5, 0, 0, 0])
for i in np.arange(len(itemCount)):
if (itemCount[i] <= giftListSummary['nGiftsNotPacked'][i]):
for j in np.arange(itemCount[i]):
giftName = giftListSummary['GiftType'][i]
currGiftID = giftListSummary['nGiftsPacked'][i]
currentBag.append(giftName+'_'+str(currGiftID))
giftListSummary['nGiftsPacked'][i] += 1
giftListSummary['nGiftsNotPacked'][i] -= 1
packedBags.append(currentBag)
# Write to File 'submission.csv'
subFile = open('submission_5.csv','w')
subFile.write('Gifts\n')
for currentBag in packedBags:
subFile.write(currentBag[0])
for currentItem in currentBag[1:]:
subFile.write(' ')
subFile.write(currentItem)
subFile.write('\n')
subFile.close()
return packedBags | mit |
combust-ml/mleap | python/tests/pyspark/feature/math_binary_test.py | 2 | 6892 | import math
import os
import shutil
import tempfile
import unittest
import mleap.pyspark # noqa
from mleap.pyspark.spark_support import SimpleSparkSerializer # noqa
import pandas as pd
from pandas.testing import assert_frame_equal
from pyspark.ml import Pipeline
from pyspark.sql.types import FloatType
from pyspark.sql.types import StructType
from pyspark.sql.types import StructField
from mleap.pyspark.feature.math_binary import MathBinary
from mleap.pyspark.feature.math_binary import BinaryOperation
from tests.pyspark.lib.spark_session import spark_session
INPUT_SCHEMA = StructType([
StructField('f1', FloatType()),
StructField('f2', FloatType()),
])
class MathBinaryTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.spark = spark_session()
@classmethod
def tearDownClass(cls):
cls.spark.stop()
def setUp(self):
self.input = self.spark.createDataFrame([
(
float(i),
float(i * 2),
)
for i in range(1, 10)
], INPUT_SCHEMA)
self.expected_add = pd.DataFrame(
[(
float(i + i * 2)
)
for i in range(1, 10)],
columns=['add(f1, f2)'],
)
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def _new_add_math_binary(self):
return MathBinary(
operation=BinaryOperation.Add,
inputA="f1",
inputB="f2",
outputCol="add(f1, f2)",
)
def test_add_math_binary(self):
add_transformer = self._new_add_math_binary()
result = add_transformer.transform(self.input).toPandas()[['add(f1, f2)']]
assert_frame_equal(self.expected_add, result)
def test_math_binary_pipeline(self):
add_transformer = self._new_add_math_binary()
mul_transformer = MathBinary(
operation=BinaryOperation.Multiply,
inputA="f1",
inputB="add(f1, f2)",
outputCol="mul(f1, add(f1, f2))",
)
expected = pd.DataFrame(
[(
float(i * (i + i * 2))
)
for i in range(1, 10)],
columns=['mul(f1, add(f1, f2))'],
)
pipeline = Pipeline(
stages=[add_transformer, mul_transformer]
)
pipeline_model = pipeline.fit(self.input)
result = pipeline_model.transform(self.input).toPandas()[['mul(f1, add(f1, f2))']]
assert_frame_equal(expected, result)
def test_can_instantiate_all_math_binary(self):
for binary_operation in BinaryOperation:
transformer = MathBinary(
operation=binary_operation,
inputA="f1",
inputB="f2",
outputCol="operation",
)
def test_serialize_deserialize_math_binary(self):
add_transformer = self._new_add_math_binary()
file_path = '{}{}'.format('jar:file:', os.path.join(self.tmp_dir, 'math_binary.zip'))
add_transformer.serializeToBundle(file_path, self.input)
deserialized_math_binary = SimpleSparkSerializer().deserializeFromBundle(file_path)
result = deserialized_math_binary.transform(self.input).toPandas()[['add(f1, f2)']]
assert_frame_equal(self.expected_add, result)
def test_serialize_deserialize_pipeline(self):
add_transformer = self._new_add_math_binary()
mul_transformer = MathBinary(
operation=BinaryOperation.Multiply,
inputA="f1",
inputB="add(f1, f2)",
outputCol="mul(f1, add(f1, f2))",
)
expected = pd.DataFrame(
[(
float(i * (i + i * 2))
)
for i in range(1, 10)],
columns=['mul(f1, add(f1, f2))'],
)
pipeline = Pipeline(
stages=[add_transformer, mul_transformer]
)
pipeline_model = pipeline.fit(self.input)
file_path = '{}{}'.format('jar:file:', os.path.join(self.tmp_dir, 'math_binary_pipeline.zip'))
pipeline_model.serializeToBundle(file_path, self.input)
deserialized_pipeline = SimpleSparkSerializer().deserializeFromBundle(file_path)
result = pipeline_model.transform(self.input).toPandas()[['mul(f1, add(f1, f2))']]
assert_frame_equal(expected, result)
def test_add_math_binary_defaults_none(self):
add_transformer = self._new_add_math_binary()
none_df = self.spark.createDataFrame([
(None, float(i * 2))
for i in range(1, 3)
], INPUT_SCHEMA)
# Summing None + int yields Nones
expected_df = pd.DataFrame([
(None,)
for i in range(1, 3)
], columns=['add(f1, f2)'])
result = add_transformer.transform(none_df).toPandas()[['add(f1, f2)']]
assert_frame_equal(expected_df, result)
def test_mult_math_binary_default_inputA(self):
mult_transformer = MathBinary(
operation=BinaryOperation.Multiply,
inputB="f2",
outputCol="mult(1, f2)",
defaultA=1.0,
)
none_df = self.spark.createDataFrame([
(None, float(i * 1234))
for i in range(1, 3)
], INPUT_SCHEMA)
expected_df = pd.DataFrame([
(float(i * 1234), )
for i in range(1, 3)
], columns=['mult(1, f2)'])
result = mult_transformer.transform(none_df).toPandas()[['mult(1, f2)']]
assert_frame_equal(expected_df, result)
def test_mult_math_binary_default_inputB(self):
mult_transformer = MathBinary(
operation=BinaryOperation.Multiply,
inputA="f1",
outputCol="mult(f1, 2)",
defaultB=2.0,
)
none_df = self.spark.createDataFrame([
(float(i * 1234), None)
for i in range(1, 3)
], INPUT_SCHEMA)
expected_df = pd.DataFrame([
(float(i * 1234 * 2), )
for i in range(1, 3)
], columns=['mult(f1, 2)'])
result = mult_transformer.transform(none_df).toPandas()[['mult(f1, 2)']]
assert_frame_equal(expected_df, result)
def test_mult_math_binary_default_both(self):
mult_transformer = MathBinary(
operation=BinaryOperation.Multiply,
outputCol="mult(7, 8)",
defaultA=7.0,
defaultB=8.0,
)
none_df = self.spark.createDataFrame([
(None, None)
for i in range(1, 3)
], INPUT_SCHEMA)
expected_df = pd.DataFrame([
(float(7 * 8), )
for i in range(1, 3)
], columns=['mult(7, 8)'])
result = mult_transformer.transform(none_df).toPandas()[['mult(7, 8)']]
assert_frame_equal(expected_df, result)
| apache-2.0 |
ryfeus/lambda-packs | Pandas_numpy/source/pandas/core/sorting.py | 6 | 15948 | """ miscellaneous sorting / groupby utilities """
import numpy as np
from pandas.compat import long, string_types, PY3
from pandas.core.dtypes.common import (
_ensure_platform_int,
_ensure_int64,
is_list_like,
is_categorical_dtype)
from pandas.core.dtypes.cast import infer_dtype_from_array
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algorithms
from pandas._libs import lib, algos, hashtable
from pandas._libs.hashtable import unique_label_indices
_INT64_MAX = np.iinfo(np.int64).max
def get_group_index(labels, shape, sort, xnull):
"""
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations, *as long as* this space fits within int64 bounds;
otherwise, though group indices identify unique combinations of
labels, they cannot be deconstructed.
- If `sort`, rank of returned ids preserve lexical ranks of labels.
i.e. returned id's can be used to do lexical sort on labels;
- If `xnull` nulls (-1 labels) are passed through.
Parameters
----------
labels: sequence of arrays
Integers identifying levels at each location
shape: sequence of ints same length as labels
Number of unique levels at each location
sort: boolean
If the ranks of returned ids should match lexical ranks of labels
xnull: boolean
If true nulls are excluded. i.e. -1 values in the labels are
passed through
Returns
-------
An array of type int64 where two elements are equal if their corresponding
labels are equal at all location.
"""
def _int64_cut_off(shape):
acc = long(1)
for i, mul in enumerate(shape):
acc *= long(mul)
if not acc < _INT64_MAX:
return i
return len(shape)
def loop(labels, shape):
# how many levels can be done without overflow:
nlev = _int64_cut_off(shape)
# compute flat ids for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
out = stride * labels[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
if shape[i] == 0:
stride = 0
else:
stride //= shape[i]
out += labels[i] * stride
if xnull: # exclude nulls
mask = labels[0] == -1
for lab in labels[1:nlev]:
mask |= lab == -1
out[mask] = -1
if nlev == len(shape): # all levels done!
return out
# compress what has been done so far in order to avoid overflow
# to retain lexical ranks, obs_ids should be sorted
comp_ids, obs_ids = compress_group_index(out, sort=sort)
labels = [comp_ids] + labels[nlev:]
shape = [len(obs_ids)] + shape[nlev:]
return loop(labels, shape)
def maybe_lift(lab, size): # pormote nan values
return (lab + 1, size + 1) if (lab == -1).any() else (lab, size)
labels = map(_ensure_int64, labels)
if not xnull:
labels, shape = map(list, zip(*map(maybe_lift, labels, shape)))
return loop(list(labels), list(shape))
def get_compressed_ids(labels, sizes):
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
Parameters
----------
labels : list of label arrays
sizes : list of size of the levels
Returns
-------
tuple of (comp_ids, obs_group_ids)
"""
ids = get_group_index(labels, sizes, sort=True, xnull=False)
return compress_group_index(ids, sort=True)
def is_int64_overflow_possible(shape):
the_prod = long(1)
for x in shape:
the_prod *= long(x)
return the_prod >= _INT64_MAX
def decons_group_index(comp_labels, shape):
# reconstruct labels
if is_int64_overflow_possible(shape):
# at some point group indices are factorized,
# and may not be deconstructed here! wrong path!
raise ValueError('cannot deconstruct factorized group indices!')
label_list = []
factor = 1
y = 0
x = comp_labels
for i in reversed(range(len(shape))):
labels = (x - y) % (factor * shape[i]) // factor
np.putmask(labels, comp_labels < 0, -1)
label_list.append(labels)
y = labels * factor
factor *= shape[i]
return label_list[::-1]
def decons_obs_group_ids(comp_ids, obs_ids, shape, labels, xnull):
"""
reconstruct labels from observed group ids
Parameters
----------
xnull: boolean,
if nulls are excluded; i.e. -1 labels are passed through
"""
if not xnull:
lift = np.fromiter(((a == -1).any() for a in labels), dtype='i8')
shape = np.asarray(shape, dtype='i8') + lift
if not is_int64_overflow_possible(shape):
# obs ids are deconstructable! take the fast route!
out = decons_group_index(obs_ids, shape)
return out if xnull or not lift.any() \
else [x - y for x, y in zip(out, lift)]
i = unique_label_indices(comp_ids)
i8copy = lambda a: a.astype('i8', subok=False, copy=True)
return [i8copy(lab[i]) for lab in labels]
def indexer_from_factorized(labels, shape, compress=True):
ids = get_group_index(labels, shape, sort=True, xnull=False)
if not compress:
ngroups = (ids.size and ids.max()) + 1
else:
ids, obs = compress_group_index(ids, sort=True)
ngroups = len(obs)
return get_group_index_sorter(ids, ngroups)
def lexsort_indexer(keys, orders=None, na_position='last'):
from pandas.core.categorical import Categorical
labels = []
shape = []
if isinstance(orders, bool):
orders = [orders] * len(keys)
elif orders is None:
orders = [True] * len(keys)
for key, order in zip(keys, orders):
# we are already a Categorical
if is_categorical_dtype(key):
c = key
# create the Categorical
else:
c = Categorical(key, ordered=True)
if na_position not in ['last', 'first']:
raise ValueError('invalid na_position: {!r}'.format(na_position))
n = len(c.categories)
codes = c.codes.copy()
mask = (c.codes == -1)
if order: # ascending
if na_position == 'last':
codes = np.where(mask, n, codes)
elif na_position == 'first':
codes += 1
else: # not order means descending
if na_position == 'last':
codes = np.where(mask, n, n - codes - 1)
elif na_position == 'first':
codes = np.where(mask, 0, n - codes)
if mask.any():
n += 1
shape.append(n)
labels.append(codes)
return indexer_from_factorized(labels, shape)
def nargsort(items, kind='quicksort', ascending=True, na_position='last'):
"""
This is intended to be a drop-in replacement for np.argsort which
handles NaNs. It adds ascending and na_position parameters.
GH #6399, #5231
"""
# specially handle Categorical
if is_categorical_dtype(items):
return items.argsort(ascending=ascending, kind=kind)
items = np.asanyarray(items)
idx = np.arange(len(items))
mask = isna(items)
non_nans = items[~mask]
non_nan_idx = idx[~mask]
nan_idx = np.nonzero(mask)[0]
if not ascending:
non_nans = non_nans[::-1]
non_nan_idx = non_nan_idx[::-1]
indexer = non_nan_idx[non_nans.argsort(kind=kind)]
if not ascending:
indexer = indexer[::-1]
# Finally, place the NaNs at the end or the beginning according to
# na_position
if na_position == 'last':
indexer = np.concatenate([indexer, nan_idx])
elif na_position == 'first':
indexer = np.concatenate([nan_idx, indexer])
else:
raise ValueError('invalid na_position: {!r}'.format(na_position))
return indexer
class _KeyMapper(object):
"""
Ease my suffering. Map compressed group id -> key tuple
"""
def __init__(self, comp_ids, ngroups, levels, labels):
self.levels = levels
self.labels = labels
self.comp_ids = comp_ids.astype(np.int64)
self.k = len(labels)
self.tables = [hashtable.Int64HashTable(ngroups)
for _ in range(self.k)]
self._populate_tables()
def _populate_tables(self):
for labs, table in zip(self.labels, self.tables):
table.map(self.comp_ids, labs.astype(np.int64))
def get_key(self, comp_id):
return tuple(level[table.get_item(comp_id)]
for table, level in zip(self.tables, self.levels))
def get_flattened_iterator(comp_ids, ngroups, levels, labels):
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, levels, labels)
return [mapper.get_key(i) for i in range(ngroups)]
def get_indexer_dict(label_list, keys):
""" return a diction of {labels} -> {indexers} """
shape = list(map(len, keys))
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
ngroups = ((group_index.size and group_index.max()) + 1) \
if is_int64_overflow_possible(shape) \
else np.prod(shape, dtype='i8')
sorter = get_group_index_sorter(group_index, ngroups)
sorted_labels = [lab.take(sorter) for lab in label_list]
group_index = group_index.take(sorter)
return lib.indices_fast(sorter, group_index, keys, sorted_labels)
# ----------------------------------------------------------------------
# sorting levels...cleverly?
def get_group_index_sorter(group_index, ngroups):
"""
algos.groupsort_indexer implements `counting sort` and it is at least
O(ngroups), where
ngroups = prod(shape)
shape = map(len, keys)
that is, linear in the number of combinations (cartesian product) of unique
values of groupby keys. This can be huge when doing multi-key groupby.
np.argsort(kind='mergesort') is O(count x log(count)) where count is the
length of the data-frame;
Both algorithms are `stable` sort and that is necessary for correctness of
groupby operations. e.g. consider:
df.groupby(key)[col].transform('first')
"""
count = len(group_index)
alpha = 0.0 # taking complexities literally; there may be
beta = 1.0 # some room for fine-tuning these parameters
do_groupsort = (count > 0 and ((alpha + beta * ngroups) <
(count * np.log(count))))
if do_groupsort:
sorter, _ = algos.groupsort_indexer(_ensure_int64(group_index),
ngroups)
return _ensure_platform_int(sorter)
else:
return group_index.argsort(kind='mergesort')
def compress_group_index(group_index, sort=True):
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
"""
size_hint = min(len(group_index), hashtable._SIZE_HINT_LIMIT)
table = hashtable.Int64HashTable(size_hint)
group_index = _ensure_int64(group_index)
# note, group labels come out ascending (ie, 1,2,3 etc)
comp_ids, obs_group_ids = table.get_labels_groupby(group_index)
if sort and len(obs_group_ids) > 0:
obs_group_ids, comp_ids = _reorder_by_uniques(obs_group_ids, comp_ids)
return comp_ids, obs_group_ids
def _reorder_by_uniques(uniques, labels):
# sorter is index where elements ought to go
sorter = uniques.argsort()
# reverse_indexer is where elements came from
reverse_indexer = np.empty(len(sorter), dtype=np.int64)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = labels < 0
# move labels to right locations (ie, unsort ascending labels)
labels = algorithms.take_nd(reverse_indexer, labels, allow_fill=False)
np.putmask(labels, mask, -1)
# sort observed ids
uniques = algorithms.take_nd(uniques, sorter, allow_fill=False)
return uniques, labels
def safe_sort(values, labels=None, na_sentinel=-1, assume_unique=False):
"""
Sort ``values`` and reorder corresponding ``labels``.
``values`` should be unique if ``labels`` is not None.
Safe for use with mixed types (int, str), orders ints before strs.
.. versionadded:: 0.19.0
Parameters
----------
values : list-like
Sequence; must be unique if ``labels`` is not None.
labels : list_like
Indices to ``values``. All out of bound indices are treated as
"not found" and will be masked with ``na_sentinel``.
na_sentinel : int, default -1
Value in ``labels`` to mark "not found".
Ignored when ``labels`` is None.
assume_unique : bool, default False
When True, ``values`` are assumed to be unique, which can speed up
the calculation. Ignored when ``labels`` is None.
Returns
-------
ordered : ndarray
Sorted ``values``
new_labels : ndarray
Reordered ``labels``; returned when ``labels`` is not None.
Raises
------
TypeError
* If ``values`` is not list-like or if ``labels`` is neither None
nor list-like
* If ``values`` cannot be sorted
ValueError
* If ``labels`` is not None and ``values`` contain duplicates.
"""
if not is_list_like(values):
raise TypeError("Only list-like objects are allowed to be passed to"
"safe_sort as values")
if not isinstance(values, np.ndarray):
# don't convert to string types
dtype, _ = infer_dtype_from_array(values)
values = np.asarray(values, dtype=dtype)
def sort_mixed(values):
# order ints before strings, safe in py3
str_pos = np.array([isinstance(x, string_types) for x in values],
dtype=bool)
nums = np.sort(values[~str_pos])
strs = np.sort(values[str_pos])
return np.concatenate([nums, np.asarray(strs, dtype=object)])
sorter = None
if PY3 and lib.infer_dtype(values) == 'mixed-integer':
# unorderable in py3 if mixed str/int
ordered = sort_mixed(values)
else:
try:
sorter = values.argsort()
ordered = values.take(sorter)
except TypeError:
# try this anyway
ordered = sort_mixed(values)
# labels:
if labels is None:
return ordered
if not is_list_like(labels):
raise TypeError("Only list-like objects or None are allowed to be"
"passed to safe_sort as labels")
labels = _ensure_platform_int(np.asarray(labels))
from pandas import Index
if not assume_unique and not Index(values).is_unique:
raise ValueError("values should be unique if labels is not None")
if sorter is None:
# mixed types
(hash_klass, _), values = algorithms._get_data_algo(
values, algorithms._hashtables)
t = hash_klass(len(values))
t.map_locations(values)
sorter = _ensure_platform_int(t.lookup(ordered))
reverse_indexer = np.empty(len(sorter), dtype=np.int_)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = (labels < -len(values)) | (labels >= len(values)) | \
(labels == na_sentinel)
# (Out of bound indices will be masked with `na_sentinel` next, so we may
# deal with them here without performance loss using `mode='wrap'`.)
new_labels = reverse_indexer.take(labels, mode='wrap')
np.putmask(new_labels, mask, na_sentinel)
return ordered, _ensure_platform_int(new_labels)
| mit |
WilliamDiakite/ExperimentationsACA | processing/lsa.py | 1 | 3364 |
import os
import sys
import itertools
import operator
import nltk
import numpy as np
import matplotlib.pyplot as plt
from nltk.util import ngrams
from collections import Counter
from spell_checker import SpellChecker
from sklearn.decomposition import TruncatedSVD
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
sys.path.insert(0, '/Users/diakite_w/Documents/Dev/ExperimentationsACA/FrenchLefffLemmatizer')
from FrenchLefffLemmatizer import FrenchLefffLemmatizer
def extract_ngrams(documents, n):
'''
Return list of n-grams
'''
chained_documents = list(itertools.chain.from_iterable(documents))
return Counter(ngrams(chained_documents, n))
def tokenize(text):
fll = FrenchLefffLemmatizer()
splck = SpellChecker()
contracted_pronouns = ["l'", "m'", "n'", "d'", "c'", "j'", "qu'", "s'"]
dictionnary = []
stopwords = [w.rstrip() for w in open('stopwords-fr.txt')]
# Put everything to lower case
text = text.lower()
# Tokenize text
tokens = nltk.tokenize.word_tokenize(text)
print('Nombre de tokens dans le texte :', len(tokens))
#tokens = [splck.correct(t) if t not in dictionnary else t for t in tokens]
# Remove contacted pronous from tokens
tokens = [t[2:] if t[:2] in contracted_pronouns else t for t in tokens]
tokens = [t for t in tokens if len(t) > 2]
tokens = [t for t in tokens if t not in stopwords]
tokens = [fll.lemmatize(t) for t in tokens]
print('Nombre de tokens apres traitement :', len(tokens), '\n')
return tokens
def tokens_to_vec(tokens):
vec = np.zeros(len(word_index_map))
for token in tokens:
idx = word_index_map[token]
vec[idx] = 1
return vec
def read_txt(textfile):
with open(textfile, 'r') as f:
text = f.read()
text = text.replace('\n', ' ')
text = text.replace('- ', '')
text = text.replace('.', '')
text = text.replace('-', '')
text = text.replace("‘l'", 'ï')
return text
def get_all_doc(directory):
'''
Read all txt documents and append them in string
'''
documents = []
counter = 1
for filename in os.listdir(directory):
if filename.endswith('.txt'):
print('\n[...] Reading document', counter)
filename = 'data/' + filename
documents.append(read_txt(filename))
counter += 1
return documents
documents = get_all_doc('data/')
all_tokens = [tokenize(doc) for doc in documents]
vocabulary = list(set(itertools.chain.from_iterable(all_tokens)))
print ('\nVocab size:', len(vocabulary))
# Computing n-grams
bigrams = extract_ngrams(all_tokens, 2)
trigrams = extract_ngrams(all_tokens, 3)
[print(t) for t in trigrams.most_common(5)]
print('\n')
[print(t) for t in bigrams.most_common(10)]
'''
# Key: word - value: index
word_index_map = {j: i for i, j in enumerate(vocabulary)}
# Key: index - value: word
index_word_map = sorted(word_index_map.items(), key=operator.itemgetter(1))
index_word_map = [t[0] for t in index_word_map]
N = len(documents)
D = len(word_index_map)
X = np.zeros((D,N))
i = 0
for tokens in all_tokens:
X[:,i] = tokens_to_vec(tokens)
i += 1
print(X.shape)
svd = TruncatedSVD()
Z = svd.fit_transform(X)
print('Z shape', Z.shape)
plt.scatter(Z[:,0], Z[:,1])
print('D:', D)
for i in range(D):
plt.annotate(s=index_word_map[i], xy=(Z[i,0], Z[i,1]))
plt.show()
'''
| mit |
JaviMerino/lisa | libs/utils/analysis/frequency_analysis.py | 1 | 24894 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Frequency Analysis Module """
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import pandas as pd
import pylab as pl
import operator
from trappy.utils import listify
from devlib.utils.misc import memoized
from collections import namedtuple
from analysis_module import AnalysisModule
# Configure logging
import logging
NON_IDLE_STATE = 4294967295
ResidencyTime = namedtuple('ResidencyTime', ['total', 'active'])
ResidencyData = namedtuple('ResidencyData', ['label', 'residency'])
class FrequencyAnalysis(AnalysisModule):
"""
Support for plotting Frequency Analysis data
:param trace: input Trace object
:type trace: :mod:`libs.utils.Trace`
"""
def __init__(self, trace):
super(FrequencyAnalysis, self).__init__(trace)
###############################################################################
# DataFrame Getter Methods
###############################################################################
def _dfg_cpu_frequency_residency(self, cpu, total=True):
"""
Get per-CPU frequency residency, i.e. amount of
time CPU `cpu` spent at each frequency.
:param cpu: CPU ID
:type cpu: int
:param total: if true returns the "total" time, otherwise the "active"
time is returned
:type total: bool
:returns: :mod:`pandas.DataFrame` - "total" or "active" time residency
at each frequency.
"""
residency = self._getCPUFrequencyResidency(cpu)
if not residency:
return None
if total:
return residency.total
return residency.active
def _dfg_cluster_frequency_residency(self, cluster, total=True):
"""
Get per-Cluster frequency residency, i.e. amount of time CLUSTER
`cluster` spent at each frequency.
:param cluster: this can be either a single CPU ID or a list of CPU IDs
belonging to a cluster or the cluster name as specified in the
platform description
:type cluster: str or int or list(int)
:param total: if true returns the "total" time, otherwise the "active"
time is returned
:type total: bool
:returns: :mod:`pandas.DataFrame` - "total" or "active" time residency
at each frequency.
"""
residency = self._getClusterFrequencyResidency(cluster)
if not residency:
return None
if total:
return residency.total
return residency.active
###############################################################################
# Plotting Methods
###############################################################################
def plotClusterFrequencies(self, title='Clusters Frequencies'):
"""
Plot frequency trend for all clusters. If sched_overutilized events are
available, the plots will also show the intervals of time where the
cluster was overutilized.
:param title: user-defined plot title
:type title: str
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, plot DISABLED!')
return
df = self._dfg_trace_event('cpu_frequency')
pd.options.mode.chained_assignment = None
# Extract LITTLE and big clusters frequencies
# and scale them to [MHz]
if len(self._platform['clusters']['little']):
lfreq = df[df.cpu == self._platform['clusters']['little'][-1]]
lfreq['frequency'] = lfreq['frequency']/1e3
else:
lfreq = []
if len(self._platform['clusters']['big']):
bfreq = df[df.cpu == self._platform['clusters']['big'][-1]]
bfreq['frequency'] = bfreq['frequency']/1e3
else:
bfreq = []
# Compute AVG frequency for LITTLE cluster
avg_lfreq = 0
if len(lfreq) > 0:
lfreq['timestamp'] = lfreq.index
lfreq['delta'] = (lfreq['timestamp'] -lfreq['timestamp'].shift()).fillna(0).shift(-1)
lfreq['cfreq'] = (lfreq['frequency'] * lfreq['delta']).fillna(0)
timespan = lfreq.iloc[-1].timestamp - lfreq.iloc[0].timestamp
avg_lfreq = lfreq['cfreq'].sum()/timespan
# Compute AVG frequency for big cluster
avg_bfreq = 0
if len(bfreq) > 0:
bfreq['timestamp'] = bfreq.index
bfreq['delta'] = (bfreq['timestamp'] - bfreq['timestamp'].shift()).fillna(0).shift(-1)
bfreq['cfreq'] = (bfreq['frequency'] * bfreq['delta']).fillna(0)
timespan = bfreq.iloc[-1].timestamp - bfreq.iloc[0].timestamp
avg_bfreq = bfreq['cfreq'].sum()/timespan
pd.options.mode.chained_assignment = 'warn'
# Setup a dual cluster plot
fig, pltaxes = plt.subplots(2, 1, figsize=(16, 8))
plt.suptitle(title, y=.97, fontsize=16, horizontalalignment='center')
# Plot Cluster frequencies
axes = pltaxes[0]
axes.set_title('big Cluster')
if avg_bfreq > 0:
axes.axhline(avg_bfreq, color='r', linestyle='--', linewidth=2)
axes.set_ylim(
(self._platform['freqs']['big'][0] - 100000)/1e3,
(self._platform['freqs']['big'][-1] + 100000)/1e3
)
if len(bfreq) > 0:
bfreq['frequency'].plot(style=['r-'], ax=axes,
drawstyle='steps-post', alpha=0.4)
else:
logging.warn('NO big CPUs frequency events to plot')
axes.set_xlim(self._trace.x_min, self._trace.x_max)
axes.set_ylabel('MHz')
axes.grid(True)
axes.set_xticklabels([])
axes.set_xlabel('')
self._trace.analysis.status.plotOverutilized(axes)
axes = pltaxes[1]
axes.set_title('LITTLE Cluster')
if avg_lfreq > 0:
axes.axhline(avg_lfreq, color='b', linestyle='--', linewidth=2)
axes.set_ylim(
(self._platform['freqs']['little'][0] - 100000)/1e3,
(self._platform['freqs']['little'][-1] + 100000)/1e3
)
if len(lfreq) > 0:
lfreq['frequency'].plot(style=['b-'], ax=axes,
drawstyle='steps-post', alpha=0.4)
else:
logging.warn('NO LITTLE CPUs frequency events to plot')
axes.set_xlim(self._trace.x_min, self._trace.x_max)
axes.set_ylabel('MHz')
axes.grid(True)
self._trace.analysis.status.plotOverutilized(axes)
# Save generated plots into datadir
figname = '{}/{}cluster_freqs.png'\
.format(self._trace.plots_dir, self._trace.plots_prefix)
pl.savefig(figname, bbox_inches='tight')
logging.info('LITTLE cluster average frequency: %.3f GHz',
avg_lfreq/1e3)
logging.info('big cluster average frequency: %.3f GHz',
avg_bfreq/1e3)
return (avg_lfreq/1e3, avg_bfreq/1e3)
def plotCPUFrequencyResidency(self, cpus=None, pct=False, active=False):
"""
Plot per-CPU frequency residency. big CPUs are plotted first and then
LITTLEs.
Requires the following trace events:
- cpu_frequency
- cpu_idle
:param cpus: List of cpus. By default plot all CPUs
:type cpus: list(str)
:param pct: plot residencies in percentage
:type pct: bool
:param active: for percentage plot specify whether to plot active or
total time. Default is TOTAL time
:type active: bool
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, plot DISABLED!')
return
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, plot DISABLED!')
return
if cpus is None:
# Generate plots only for available CPUs
cpufreq_data = self._dfg_trace_event('cpu_frequency')
_cpus = range(cpufreq_data.cpu.max()+1)
else:
_cpus = listify(cpus)
# Split between big and LITTLE CPUs ordered from higher to lower ID
_cpus.reverse()
big_cpus = [c for c in _cpus if c in self._platform['clusters']['big']]
little_cpus = [c for c in _cpus if c in
self._platform['clusters']['little']]
_cpus = big_cpus + little_cpus
# Precompute active and total time for each CPU
residencies = []
xmax = 0.0
for cpu in _cpus:
res = self._getCPUFrequencyResidency(cpu)
residencies.append(ResidencyData('CPU{}'.format(cpu), res))
max_time = res.total.max().values[0]
if xmax < max_time:
xmax = max_time
self._plotFrequencyResidency(residencies, 'cpu', xmax, pct, active)
def plotClusterFrequencyResidency(self, clusters=None,
pct=False, active=False):
"""
Plot the frequency residency in a given cluster, i.e. the amount of
time cluster `cluster` spent at frequency `f_i`. By default, both 'big'
and 'LITTLE' clusters data are plotted.
Requires the following trace events:
- cpu_frequency
- cpu_idle
:param clusters: name of the clusters to be plotted (all of them by
default)
:type clusters: str ot list(str)
:param pct: plot residencies in percentage
:type pct: bool
:param active: for percentage plot specify whether to plot active or
total time. Default is TOTAL time
:type active: bool
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, plot DISABLED!')
return
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, plot DISABLED!')
return
# Assumption: all CPUs in a cluster run at the same frequency, i.e. the
# frequency is scaled per-cluster not per-CPU. Hence, we can limit the
# cluster frequencies data to a single CPU
if not self._trace.freq_coherency:
logging.warn('Cluster frequency is not coherent, plot DISABLED!')
return
# Sanitize clusters
if clusters is None:
_clusters = self._platform['clusters'].keys()
else:
_clusters = listify(clusters)
# Precompute active and total time for each cluster
residencies = []
xmax = 0.0
for cluster in _clusters:
res = self._getClusterFrequencyResidency(
self._platform['clusters'][cluster.lower()])
residencies.append(ResidencyData('{} Cluster'.format(cluster),
res))
max_time = res.total.max().values[0]
if xmax < max_time:
xmax = max_time
self._plotFrequencyResidency(residencies, 'cluster', xmax, pct, active)
###############################################################################
# Utility Methods
###############################################################################
@memoized
def _getCPUActiveSignal(self, cpu):
"""
Build a square wave representing the active (i.e. non-idle) CPU time,
i.e.:
cpu_active[t] == 1 if at least one CPU is reported to be
non-idle by CPUFreq at time t
cpu_active[t] == 0 otherwise
:param cpu: CPU ID
:type cpu: int
"""
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, '
'cannot compute CPU active signal!')
return None
idle_df = self._dfg_trace_event('cpu_idle')
cpu_df = idle_df[idle_df.cpu_id == cpu]
cpu_active = cpu_df.state.apply(
lambda s: 1 if s == NON_IDLE_STATE else 0
)
start_time = 0.0
if not self._trace.ftrace.normalized_time:
start_time = self._trace.ftrace.basetime
if cpu_active.index[0] != start_time:
entry_0 = pd.Series(cpu_active.iloc[0] ^ 1, index=[start_time])
cpu_active = pd.concat([entry_0, cpu_active])
return cpu_active
@memoized
def _getClusterActiveSignal(self, cluster):
"""
Build a square wave representing the active (i.e. non-idle) cluster
time, i.e.:
cluster_active[t] == 1 if at least one CPU is reported to be
non-idle by CPUFreq at time t
cluster_active[t] == 0 otherwise
:param cluster: list of CPU IDs belonging to a cluster
:type cluster: list(int)
"""
cpu_active = {}
for cpu in cluster:
cpu_active[cpu] = self._getCPUActiveSignal(cpu)
active = pd.DataFrame(cpu_active)
active.fillna(method='ffill', inplace=True)
# Cluster active is the OR between the actives on each CPU
# belonging to that specific cluster
cluster_active = reduce(
operator.or_,
[cpu_active.astype(int) for _, cpu_active in
active.iteritems()]
)
return cluster_active
@memoized
def _getClusterFrequencyResidency(self, cluster):
"""
Get a DataFrame with per cluster frequency residency, i.e. amount of
time spent at a given frequency in each cluster.
:param cluster: this can be either a single CPU ID or a list of CPU IDs
belonging to a cluster or the cluster name as specified in the
platform description
:type cluster: str or int or list(int)
:returns: namedtuple(ResidencyTime) - tuple of total and active time
dataframes
:raises: KeyError
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, '
'frequency residency computation not possible!')
return None
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, '
'frequency residency computation not possible!')
return None
if isinstance(cluster, str):
try:
_cluster = self._platform['clusters'][cluster.lower()]
except KeyError:
logging.warn('%s cluster not found!', cluster)
return None
else:
_cluster = listify(cluster)
freq_df = self._dfg_trace_event('cpu_frequency')
# Assumption: all CPUs in a cluster run at the same frequency, i.e. the
# frequency is scaled per-cluster not per-CPU. Hence, we can limit the
# cluster frequencies data to a single CPU. This assumption is verified
# by the Trace module when parsing the trace.
if len(_cluster) > 1 and not self._trace.freq_coherency:
logging.warn('Cluster frequency is NOT coherent,'
'cannot compute residency!')
return None
cluster_freqs = freq_df[freq_df.cpu == _cluster[0]]
# Compute TOTAL Time
time_intervals = cluster_freqs.index[1:] - cluster_freqs.index[:-1]
total_time = pd.DataFrame({
'time': time_intervals,
'frequency': [f/1000.0 for f in cluster_freqs.iloc[:-1].frequency]
})
total_time = total_time.groupby(['frequency']).sum()
# Compute ACTIVE Time
cluster_active = self._getClusterActiveSignal(_cluster)
# In order to compute the active time spent at each frequency we
# multiply 2 square waves:
# - cluster_active, a square wave of the form:
# cluster_active[t] == 1 if at least one CPU is reported to be
# non-idle by CPUFreq at time t
# cluster_active[t] == 0 otherwise
# - freq_active, square wave of the form:
# freq_active[t] == 1 if at time t the frequency is f
# freq_active[t] == 0 otherwise
available_freqs = sorted(cluster_freqs.frequency.unique())
new_idx = sorted(cluster_freqs.index.tolist() +
cluster_active.index.tolist())
cluster_freqs = cluster_freqs.reindex(new_idx, method='ffill')
cluster_active = cluster_active.reindex(new_idx, method='ffill')
nonidle_time = []
for f in available_freqs:
freq_active = cluster_freqs.frequency.apply(
lambda x: 1 if x == f else 0
)
active_t = cluster_active * freq_active
# Compute total time by integrating the square wave
nonidle_time.append(self._trace.integrate_square_wave(active_t))
active_time = pd.DataFrame({'time': nonidle_time},
index=[f/1000.0 for f in available_freqs])
active_time.index.name = 'frequency'
return ResidencyTime(total_time, active_time)
def _getCPUFrequencyResidency(self, cpu):
"""
Get a DataFrame with per-CPU frequency residency, i.e. amount of
time CPU `cpu` spent at each frequency. Both total and active times
will be computed.
:param cpu: CPU ID
:type cpu: int
:returns: namedtuple(ResidencyTime) - tuple of total and active time
dataframes
"""
return self._getClusterFrequencyResidency(cpu)
def _plotFrequencyResidencyAbs(self, axes, residency, n_plots,
is_first, is_last, xmax, title=''):
"""
Private method to generate frequency residency plots.
:param axes: axes over which to generate the plot
:type axes: matplotlib.axes.Axes
:param residency: tuple of total and active time dataframes
:type residency: namedtuple(ResidencyTime)
:param n_plots: total number of plots
:type n_plots: int
:param is_first: if True this is the first plot
:type is_first: bool
:param is_last: if True this is the last plot
:type is_last: bool
:param xmax: x-axes higher bound
:param xmax: double
:param title: title of this subplot
:type title: str
"""
yrange = 0.4 * max(6, len(residency.total)) * n_plots
residency.total.plot.barh(ax=axes, color='g',
legend=False, figsize=(16, yrange))
residency.active.plot.barh(ax=axes, color='r',
legend=False, figsize=(16, yrange))
axes.set_xlim(0, 1.05*xmax)
axes.set_ylabel('Frequency [MHz]')
axes.set_title(title)
axes.grid(True)
if is_last:
axes.set_xlabel('Time [s]')
else:
axes.set_xticklabels([])
if is_first:
# Put title on top of the figure. As of now there is no clean way
# to make the title appear always in the same position in the
# figure because figure heights may vary between different
# platforms (different number of OPPs). Hence, we use annotation
legend_y = axes.get_ylim()[1]
axes.annotate('OPP Residency Time', xy=(0, legend_y),
xytext=(-50, 45), textcoords='offset points',
fontsize=18)
axes.annotate('GREEN: Total', xy=(0, legend_y),
xytext=(-50, 25), textcoords='offset points',
color='g', fontsize=14)
axes.annotate('RED: Active', xy=(0, legend_y),
xytext=(50, 25), textcoords='offset points',
color='r', fontsize=14)
def _plotFrequencyResidencyPct(self, axes, residency_df, label,
n_plots, is_first, is_last, res_type):
"""
Private method to generate PERCENTAGE frequency residency plots.
:param axes: axes over which to generate the plot
:type axes: matplotlib.axes.Axes
:param residency_df: residency time dataframe
:type residency_df: :mod:`pandas.DataFrame`
:param label: label to be used for percentage residency dataframe
:type label: str
:param n_plots: total number of plots
:type n_plots: int
:param is_first: if True this is the first plot
:type is_first: bool
:param is_first: if True this is the last plot
:type is_first: bool
:param res_type: type of residency, either TOTAL or ACTIVE
:type title: str
"""
# Compute sum of the time intervals
duration = residency_df.time.sum()
residency_pct = pd.DataFrame(
{label: residency_df.time.apply(lambda x: x*100/duration)},
index=residency_df.index
)
yrange = 3 * n_plots
residency_pct.T.plot.barh(ax=axes, stacked=True, figsize=(16, yrange))
axes.legend(loc='lower center', ncol=7)
axes.set_xlim(0, 100)
axes.grid(True)
if is_last:
axes.set_xlabel('Residency [%]')
else:
axes.set_xticklabels([])
if is_first:
legend_y = axes.get_ylim()[1]
axes.annotate('OPP {} Residency Time'.format(res_type),
xy=(0, legend_y), xytext=(-50, 35),
textcoords='offset points', fontsize=18)
def _plotFrequencyResidency(self, residencies, entity_name, xmax,
pct, active):
"""
Generate Frequency residency plots for the given entities.
:param residencies:
:type residencies: namedtuple(ResidencyData) - tuple containing:
1) as first element, a label to be used as subplot title
2) as second element, a namedtuple(ResidencyTime)
:param entity_name: name of the entity ('cpu' or 'cluster') used in the
figure name
:type entity_name: str
:param xmax: upper bound of x-axes
:type xmax: double
:param pct: plot residencies in percentage
:type pct: bool
:param active: for percentage plot specify whether to plot active or
total time. Default is TOTAL time
:type active: bool
"""
n_plots = len(residencies)
gs = gridspec.GridSpec(n_plots, 1)
fig = plt.figure()
figtype = ""
for idx, data in enumerate(residencies):
if data.residency is None:
plt.close(fig)
return
axes = fig.add_subplot(gs[idx])
is_first = idx == 0
is_last = idx+1 == n_plots
if pct and active:
self._plotFrequencyResidencyPct(axes, data.residency.active,
data.label, n_plots,
is_first, is_last,
'ACTIVE')
figtype = "_pct_active"
continue
if pct:
self._plotFrequencyResidencyPct(axes, data.residency.total,
data.label, n_plots,
is_first, is_last,
'TOTAL')
figtype = "_pct_total"
continue
self._plotFrequencyResidencyAbs(axes, data.residency,
n_plots, is_first,
is_last, xmax,
title=data.label)
figname = '{}/{}{}_freq_residency{}.png'\
.format(self._trace.plots_dir,
self._trace.plots_prefix,
entity_name, figtype)
pl.savefig(figname, bbox_inches='tight')
# vim :set tabstop=4 shiftwidth=4 expandtab
| apache-2.0 |
pglomski/shopnotes | drill_speed_chart.py | 1 | 2778 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''Produce a custom twist drill plot'''
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
# set some rcParams
mpl.rcParams['font.weight'] = 'bold'
mpl.rcParams['xtick.major.pad'] = 10
mpl.rcParams['xtick.direction'] = 'inout'
mpl.rcParams['xtick.labelsize'] = 26
mpl.rcParams['ytick.direction'] = 'inout'
mpl.rcParams['ytick.labelsize'] = 20
# define the constants for our chart
materials = [
('Acrylic' , 650 , 'c' , '-' ) ,
('Aluminum' , 300 , 'b' , '-' ) ,
('Brass' , 200 , 'g' , '-' ) ,
('LC Steel' , 110 , 'k' , '-' ) ,
('Wood' , 100 , 'brown' , '-' ) ,
('MC Steel' , 80 , 'darkgray' , '-' ) ,
('HC Steel' , 60 , 'lightgray' , '-' ) ,
('Stainless' , 50 , 'purple' , '-' ) ,
]
drill_speeds = [250, 340, 390, 510, 600, 650, 990, 1550, 1620, 1900, 2620, 3100] #rpm
speed_lims = (200., 4000.) # rpm
max_in = 1. # in.
incr = 1./16. # in.
im_sz = 25. # in.
ratio = 8.5/11.
fig = plt.figure(figsize=(im_sz,ratio * im_sz), dpi=600)
fig.patch.set_alpha(0)
# generate a vector of drill bit diameter
x = np.array([float(i) * incr for i in range(1,int(max_in/incr) + 1)]) # in.
# calculate the drill speed curve for each material type and plot the curve
for name, speed, color, linestyle in materials:
plt.loglog(x, 12/np.pi/x*speed, label=name, linewidth=5, color=color, linestyle=linestyle)
ax = plt.gca()
# adjust the axis tick locators to match drill press speeds
ax.yaxis.set_major_locator(mpl.ticker.FixedLocator(drill_speeds))
ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%4d'))
ax.yaxis.set_minor_locator(mpl.ticker.NullLocator())
ax.set_ylim(speed_lims)
# set the drill diameter locators and format the ticks with LaTeX
ax.xaxis.set_major_locator(mpl.ticker.MultipleLocator(base=incr))
ax.xaxis.set_minor_locator(mpl.ticker.NullLocator())
ax.set_xlim((incr, max_in))
ticks = ['0', r'$$\frac{1}{16}$$' , r'$$\frac{1}{8}$$' , r'$$\frac{3}{16}$$' , r'$$\frac{1}{4}$$' ,
r'$$\frac{5}{16}$$' , r'$$\frac{3}{8}$$' , r'$$\frac{7}{16}$$' , r'$$\frac{1}{2}$$' ,
r'$$\frac{9}{16}$$' , r'$$\frac{5}{8}$$' , r'$$\frac{11}{16}$$' , r'$$\frac{3}{4}$$' ,
r'$$\frac{13}{16}$$' , r'$$\frac{7}{8}$$' , r'$$\frac{15}{16}$$' , r'$$1$$' ]
ax.xaxis.set_ticklabels(ticks)
# Add the Texts
plt.xlabel('Bit Diameter (in.)', fontsize=26)
plt.ylabel('Drill Speed (rpm)' , fontsize=26)
plt.title('Twist Drill Speeds' , fontsize=50)
plt.legend(ncol=2, loc=3, fontsize=40)
plt.grid('on')
plt.savefig('drill_speed_chart.png')
| agpl-3.0 |
raghavrv/scikit-learn | examples/decomposition/plot_pca_iris.py | 49 | 1511 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral,
edgecolor='k')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
LiaoPan/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 243 | 5577 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list( legend1.values() )
legend1_keys_list = list( legend1.keys() )
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list( legend2.values() )
legend2_keys_list = list( legend2.keys() )
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_values_list[0], legend2_values_list[1], legend2_values_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
LarsDu/DeepNuc | deepnuc/nucbinaryclassifier.py | 2 | 15464 | import tensorflow as tf
import numpy as np
import sklearn.metrics as metrics
#from databatcher import DataBatcher
import nucconvmodel
#import dubiotools as dbt
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pprint
from itertools import cycle
import os
import sys
#Logging imports
from logger import Logger
from nucinference import NucInference
from collections import OrderedDict
class NucBinaryClassifier(NucInference):
use_onehot_labels = True
def __init__(self,
sess,
train_batcher,
test_batcher,
num_epochs,
learning_rate,
batch_size,
seq_len,
save_dir,
keep_prob=0.5,
beta1=0.9,
concat_revcom_input=False,
nn_method_key="inferenceA",
pos_index=1):
"""NucBinaryClassifier encapsulates training and data
evaluation for
:param sess: tf.Session() object
:param train_batcher: DataBatcher object for training set
:param test_batcher: DataBatcher object for test set
:param num_epochs: Number of epoch cycles to perform training
:param learning_rate: Learning rate
:param batch_size: Mini-batch pull size
:param seq_len: Sequence length
:param save_dir: Root save directory for binary classification model
:param keep_prob: Probability of keeping weight for dropout
regularization
:param beta1: Beta1 parameter for AdamOptimizer
:param concat_revcom_input: If true, concatenate reverse
complement of nucleotide sequence to input vector
:param nn_method_key: Dictionary key for inference
method found in nucconvmodels.py file. Determines which model
to use. Example: "inferenceA" will run nucconvmodels.inferenceA
:param pos_index: The index to use for the positive class
(defaults to 1)
:returns: a NucBinaryClassifier object
:rtype: NucBinaryClassifier
"""
super(NucBinaryClassifier, self).__init__(sess,
train_batcher,
test_batcher,
num_epochs,
learning_rate,
batch_size,
seq_len,
save_dir,
keep_prob,
beta1,
concat_revcom_input,
nn_method_key="inferenceA")
if self.train_batcher.num_classes != 2:
print "Error, more than two classes detected in train batcher"
else:
self.num_classes = 2
#The index for the label that should be considered the positive class
self.pos_index=pos_index
self.save_on_epoch = 5
def build_model(self):
self.dna_seq_placeholder = tf.placeholder(tf.float32,
shape=[None,self.seq_len,4],
name="dna_seq")
self.labels_placeholder = tf.placeholder(tf.float32,
shape=[None, self.num_classes],
name="labels")
self.keep_prob_placeholder = tf.placeholder(tf.float32,name="keep_prob")
self.logits, self.network = self.nn_method(self.dna_seq_placeholder,
self.keep_prob_placeholder,
self.num_classes)
self.probs = tf.nn.softmax(self.logits)
self.loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=self.labels_placeholder,
logits=self.logits))
'''
Calculate metrics. num_true positives is the number of true positives for the current batch
Table below shows index if tf.argmax is applied
+-----+-----------+---------+
| | Classifier| Label |
+-----+-----------+---------+
| TP | 1 | 1 |
+-----+-----------+---------+
| FP | 1 | 0 |
+-----+-----------+---------+
| TN | 0 | 0 |
+-----+-----------+---------+
| FN | 0 | 1 |
+-----+-----------+---------+
Precision = TP/(TP+FP)
Recall = TP/(TP+FN)
F1-score = 2*(Prec*Rec)/(Prec+Rec)
# Note: I ended up not using the tp,fp,tn,fn ops because I ended up calculating
# these metrics using sklearn.
'''
#correct = TN+TP #Used for calculating accuracy
self.logits_ind = tf.argmax(self.logits,1)
self.labels_ind = tf.argmax(self.labels_placeholder,1)
#Create max_mask of logits (ie: [-.5,.5] --> [0 1]. Note logits have
# shape [batch_size * num_classes= 2]
#self.inverse_logits_col = tf.ones_like(self.logits_ind) - self.logits_ind
#self.max_mask_logits = tf.concat([self.inverse_logits_col,self.logits_ind],1)
#True positives where logits_ind+labels_ind == 2
#True negatives where logits_ind+labels_ind == 0
self.sum_ind = tf.add(self.logits_ind,self.labels_ind)
self.true_positives = tf.equal(self.sum_ind,2*tf.ones_like(self.sum_ind)) #bool
self.num_true_positives =tf.reduce_sum(tf.cast(self.true_positives, tf.int32))
#For FP classifier index > label index
self.false_positives=tf.greater(self.logits_ind,self.labels_ind)
self.num_false_positives = tf.reduce_sum(tf.cast(self.false_positives, tf.int32))
self.true_negatives = tf.equal(self.sum_ind,tf.zeros_like(self.sum_ind)) #bool
self.num_true_negatives= tf.reduce_sum(tf.cast(self.true_negatives,tf.int32))
#For FN classifier index < label index
self.false_negatives=tf.less(self.logits_ind,self.labels_ind)
self.num_false_negatives = tf.reduce_sum(tf.cast(self.false_negatives,tf.int32))
#num correct can be used to calculate accuracy
self.correct = tf.equal(self.logits_ind,self.labels_ind)
self.num_correct= tf.reduce_sum(tf.cast(self.correct, tf.int32))
self.relevance =self.network.relevance_backprop(tf.multiply(self.logits,
self.labels_placeholder))
'''Write and consolidate summaries'''
self.loss_summary = tf.summary.scalar('loss',self.loss)
self.summary_writer = tf.summary.FileWriter(self.summary_dir,self.sess.graph)
self.summary_op = tf.summary.merge([self.loss_summary])
#Note: Do not use tf.summary.merge_all() here. This will break encapsulation for
# cross validation and lead to crashes when training multiple models
# Add gradient ops to graph with learning rate
self.train_op = tf.train.AdamOptimizer(self.learning_rate,
beta1=self.beta1).minimize(self.loss)
self.vars = tf.trainable_variables()
self.var_names = [var.name for var in self.vars]
#print "Trainable variables:\n"
#for vname in self.var_names:
# print vname
self.saver = tf.train.Saver()
self.init_op = tf.global_variables_initializer()
#Important note: Restoring model does not require init_op.
#In fact calling tf.global_variables_initializer() after loading a model
#will overwrite loaded weights
self.sess.run(self.init_op)
self.load(self.checkpoint_dir)
def eval_model_metrics(self,
batcher,
save_plots=False,
image_name ='metrics.png',
eval_batch_size=50):
"""
Note: This method only works for binary classification
as auPRC and auROC graphs only apply to binary classificaton problems.
TODO: Modify this code to perform auROC generation
for one-vs-all in the case of multiclass classification.
"""
#Ref: http://scikit-learn.org/stable/modules/model_evaluation.html#roc-metrics
##auROC calculations
#Keep batch size at 1 for now to ensure 1 full epoch is evaluated
all_labels = np.zeros((batcher.num_records,self.num_classes), dtype = np.float32)
all_probs = np.zeros((batcher.num_records,self.num_classes), dtype = np.float32)
#num_correct = 0 #counts number of correct predictions
num_whole_pulls = batcher.num_records//eval_batch_size
num_single_pulls = batcher.num_records%eval_batch_size
num_steps = num_whole_pulls+num_single_pulls
for i in range(num_steps):
if i<num_whole_pulls:
batch_size=eval_batch_size
else:
batch_size=1
labels_batch, dna_seq_batch = batcher.pull_batch(batch_size)
feed_dict = {
self.dna_seq_placeholder:dna_seq_batch,
self.labels_placeholder:labels_batch,
self.keep_prob_placeholder:1.0
}
cur_prob= self.sess.run(self.probs,feed_dict=feed_dict)
#Fill labels array
if batch_size > 1:
start_ind = batch_size*i
elif batch_size == 1:
start_ind = num_whole_pulls*eval_batch_size+(i-num_whole_pulls)
else:
print "Never reach this condition"
all_labels[start_ind:start_ind+batch_size,:] = labels_batch
all_probs[start_ind:start_ind+batch_size,:] = cur_prob
#Calculate metrics and save results in a dict
md = self.calc_classifier_metrics(all_labels,all_probs)
md["epoch"]=self.epoch
md["step"]=self.step
#print "Testing accuracy",float(num_correct)/float(batcher.num_records)
print 'Num examples: %d Num correct: %d Accuracy: %0.04f' % \
(batcher.num_records, md["num_correct"], md["accuracy"])+'\n'
if save_plots:
###Plot some metrics
plot_colors = cycle(['cyan','blue','orange','teal'])
#print "Labels shape",all_labels.shape
#print "Probs shape",all_probs.shape
#print "Preds shape",all_preds.shape
#Generate auROC plot axes
fig1,ax1 = plt.subplots(2)
fig1.subplots_adjust(bottom=0.2)
ax1[0].plot([0,1],[0,1],color='navy',lw=2,linestyle='--')
ax1[0].set_xbound(0.0,1.0)
ax1[0].set_ybound(0.0,1.05)
ax1[0].set_xlabel('False Positive Rate')
ax1[0].set_ylabel('True Positive Rate')
ax1[0].set_title('auROC')
#plt.legend(loc='lower right')
ax1[0].plot(md["fpr"],md["tpr"],color=plot_colors.next(),
lw=2,linestyle='-',label='auROC curve (area=%0.2f)' % md["auroc"] )
#Generate auPRC plot axes
#ax1[1].plot([0,1],[1,1],color='royalblue',lw=2,linestyle='--')
ax1[1].set_xlabel('Precision')
ax1[1].set_ylabel('Recall')
ax1[1].set_title('auPRC')
ax1[1].plot(md["thresh_precision"],md["thresh_recall"],color=plot_colors.next(),
lw=2,linestyle='-',label='auPRC curve (area=%0.2f)' % md["auprc"] )
ax1[1].set_xbound(0.0,1.0)
ax1[1].set_ybound(0.0,1.05)
#Note: avg prec score is the area under the prec recall curve
#Note: Presumably class 1 (pos examples) should be the only f1 score we focus on
#print "F1 score for class",i,"is",f1_score
plt.tight_layout()
plt_fname = self.save_dir+os.sep+image_name
print "Saving auROC image to",plt_fname
fig1.savefig(plt_fname)
#Return metrics dictionary
return md
def calc_classifier_metrics(self,all_labels,all_probs):
"""Calculate some metrics for the dataset
return dictionary with metrics
:param all_probs: nx2 prob values
:param all_labels: nx2 labels
:returns: dictionary of metrics
:rtype: dict()
"""
num_records = all_probs.shape[0]
all_preds = np.zeros((num_records, self.num_classes),dtype = np.float32)
all_preds[np.arange(num_records),all_probs.argmax(1)] = 1
#Calculate accuracy
num_correct = metrics.accuracy_score(all_labels[:,self.pos_index],all_preds[:,self.pos_index],normalize=False)
accuracy = num_correct/float(all_preds.shape[0])
###Calculate auROC
#http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
#metrics.roc_curve(y_true, y_score[, ...]) #y_score is probs
fpr,tpr,_ = metrics.roc_curve(all_labels[:,self.pos_index],
all_probs[:,self.pos_index],
pos_label=self.pos_index)
auroc = metrics.auc(fpr,tpr)
thresh_precision,thresh_recall,prc_thresholds = metrics.precision_recall_curve(
all_labels[:,self.pos_index],
all_probs[:,self.pos_index])
#Calculate precision, recall, and f1-score for threshold = 0.5
#confusion_matrix = metrics.confusion_matrix(all_labels[:,self.pos_index],all_probs[:,self.pos_index])
precision, recall, f1_score, support = metrics.precision_recall_fscore_support(
all_labels[:,self.pos_index],
all_preds[:,self.pos_index],
pos_label=self.pos_index)
precision = precision[self.pos_index]
recall = recall[self.pos_index]
f1_score = f1_score[self.pos_index]
support = support[self.pos_index]
auprc = metrics.average_precision_score(all_labels[:,self.pos_index],
all_probs[:,self.pos_index])
return OrderedDict([
("num_correct",num_correct),
("accuracy",accuracy),
("auroc",auroc),
("auprc",auprc),
("fpr",fpr),
("tpr",tpr),
("precision",precision),
("recall",recall),
("f1_score",f1_score),
("support",support),
("thresh_precision",thresh_precision),
("thresh_recall",thresh_recall),
("prc_thresholds",prc_thresholds)
])
| gpl-3.0 |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/pandas/io/tests/test_common.py | 9 | 2087 | """
Tests for the pandas.io.common functionalities
"""
from pandas.compat import StringIO
import os
from os.path import isabs
import nose
import pandas.util.testing as tm
from pandas.io import common
try:
from pathlib import Path
except ImportError:
pass
try:
from py.path import local as LocalPath
except ImportError:
pass
class TestCommonIOCapabilities(tm.TestCase):
def test_expand_user(self):
filename = '~/sometest'
expanded_name = common._expand_user(filename)
self.assertNotEqual(expanded_name, filename)
self.assertTrue(isabs(expanded_name))
self.assertEqual(os.path.expanduser(filename), expanded_name)
def test_expand_user_normal_path(self):
filename = '/somefolder/sometest'
expanded_name = common._expand_user(filename)
self.assertEqual(expanded_name, filename)
self.assertEqual(os.path.expanduser(filename), expanded_name)
def test_stringify_path_pathlib(self):
tm._skip_if_no_pathlib()
rel_path = common._stringify_path(Path('.'))
self.assertEqual(rel_path, '.')
redundant_path = common._stringify_path(Path('foo//bar'))
self.assertEqual(redundant_path, os.path.join('foo', 'bar'))
def test_stringify_path_localpath(self):
tm._skip_if_no_localpath()
path = os.path.join('foo', 'bar')
abs_path = os.path.abspath(path)
lpath = LocalPath(path)
self.assertEqual(common._stringify_path(lpath), abs_path)
def test_get_filepath_or_buffer_with_path(self):
filename = '~/sometest'
filepath_or_buffer, _, _ = common.get_filepath_or_buffer(filename)
self.assertNotEqual(filepath_or_buffer, filename)
self.assertTrue(isabs(filepath_or_buffer))
self.assertEqual(os.path.expanduser(filename), filepath_or_buffer)
def test_get_filepath_or_buffer_with_buffer(self):
input_buffer = StringIO()
filepath_or_buffer, _, _ = common.get_filepath_or_buffer(input_buffer)
self.assertEqual(filepath_or_buffer, input_buffer)
| gpl-2.0 |
bdh1011/wau | venv/lib/python2.7/site-packages/pandas/io/tests/test_sql.py | 1 | 93879 | """SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, the different tested flavors (sqlite3, MySQL, PostgreSQL)
derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback` and `TestMySQLLegacy`)
"""
from __future__ import print_function
import unittest
import sqlite3
import csv
import os
import sys
import nose
import warnings
import numpy as np
from datetime import datetime, date, time
from pandas import DataFrame, Series, Index, MultiIndex, isnull, concat
from pandas import date_range, to_datetime, to_timedelta, Timestamp
import pandas.compat as compat
from pandas.compat import StringIO, range, lrange, string_types
from pandas.core.datetools import format as date_format
import pandas.io.sql as sql
from pandas.io.sql import read_sql_table, read_sql_query
import pandas.util.testing as tm
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
'create_iris': {
'sqlite': """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
'mysql': """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
'postgresql': """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)"""
},
'insert_iris': {
'sqlite': """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
'mysql': """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
'postgresql': """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);"""
},
'create_test_types': {
'sqlite': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
'mysql': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
'postgresql': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)"""
},
'insert_test_types': {
'sqlite': {
'query': """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?)
""",
'fields': (
'TextCol', 'DateCol', 'IntDateCol', 'FloatCol',
'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'
)
},
'mysql': {
'query': """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s)
""",
'fields': (
'TextCol', 'DateCol', 'IntDateCol', 'FloatCol',
'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'
)
},
'postgresql': {
'query': """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
'fields': (
'TextCol', 'DateCol', 'DateColWithTz', 'IntDateCol', 'FloatCol',
'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'
)
},
},
'read_parameters': {
'sqlite': "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
'mysql': 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
'postgresql': 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s'
},
'read_named_parameters': {
'sqlite': """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
'mysql': """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
'postgresql': """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
"""
}
}
class PandasSQLTest(unittest.TestCase):
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def drop_table(self, table_name):
self._get_exec().execute("DROP TABLE IF EXISTS %s" % table_name)
def _get_exec(self):
if hasattr(self.conn, 'execute'):
return self.conn
else:
return self.conn.cursor()
def _load_iris_data(self):
import io
iris_csv_file = os.path.join(tm.get_data_path(), 'iris.csv')
self.drop_table('iris')
self._get_exec().execute(SQL_STRINGS['create_iris'][self.flavor])
with io.open(iris_csv_file, mode='r', newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS['insert_iris'][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
self.assertTrue(
issubclass(pytype, np.floating), 'Loaded frame has incorrect type')
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _load_test1_data(self):
columns = ['index', 'A', 'B', 'C', 'D']
data = [(
'2000-01-03 00:00:00', 0.980268513777, 3.68573087906, -0.364216805298, -1.15973806169),
('2000-01-04 00:00:00', 1.04791624281, -
0.0412318367011, -0.16181208307, 0.212549316967),
('2000-01-05 00:00:00', 0.498580885705,
0.731167677815, -0.537677223318, 1.34627041952),
('2000-01-06 00:00:00', 1.12020151869, 1.56762092543, 0.00364077397681, 0.67525259227)]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(dict(A=[4, 1, 3, 6],
B=['asd', 'gsq', 'ylt', 'jkl'],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=['1990-11-22', '1991-10-26', '1993-11-26', '1995-12-12']))
df['E'] = to_datetime(df['E'])
self.test_frame2 = df
def _load_test3_data(self):
columns = ['index', 'A', 'B']
data = [(
'2000-01-03 00:00:00', 2 ** 31 - 1, -1.987670),
('2000-01-04 00:00:00', -29, -0.0412318367011),
('2000-01-05 00:00:00', 20000, 0.731167677815),
('2000-01-06 00:00:00', -290867, 1.56762092543)]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table('types_test_data')
self._get_exec().execute(SQL_STRINGS['create_test_types'][self.flavor])
ins = SQL_STRINGS['insert_test_types'][self.flavor]
data = [
{
'TextCol': 'first',
'DateCol': '2000-01-03 00:00:00',
'DateColWithTz': '2000-01-01 00:00:00-08:00',
'IntDateCol': 535852800,
'FloatCol': 10.10,
'IntCol': 1,
'BoolCol': False,
'IntColWithNull': 1,
'BoolColWithNull': False,
},
{
'TextCol': 'first',
'DateCol': '2000-01-04 00:00:00',
'DateColWithTz': '2000-06-01 00:00:00-07:00',
'IntDateCol': 1356998400,
'FloatCol': 10.10,
'IntCol': 1,
'BoolCol': False,
'IntColWithNull': None,
'BoolColWithNull': None,
},
]
for d in data:
self._get_exec().execute(
ins['query'],
[d[field] for field in ins['fields']]
)
def _count_rows(self, table_name):
result = self._get_exec().execute(
"SELECT count(*) AS count_1 FROM %s" % table_name).fetchone()
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS['read_parameters'][self.flavor]
params = ['Iris-setosa', 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS['read_named_parameters'][self.flavor]
params = {'name': 'Iris-setosa', 'length': 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame1')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
# Nuke table
self.drop_table('test_frame1')
def _to_sql_empty(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], 'test_frame1')
def _to_sql_fail(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
self.assertRaises(ValueError, self.pandasSQL.to_sql,
self.test_frame1, 'test_frame1', if_exists='fail')
self.drop_table('test_frame1')
def _to_sql_replace(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='replace')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.drop_table('test_frame1')
def _to_sql_append(self):
# Nuke table just in case
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='append')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.drop_table('test_frame1')
def _roundtrip(self):
self.drop_table('test_frame_roundtrip')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame_roundtrip')
result = self.pandasSQL.read_query('SELECT * FROM test_frame_roundtrip')
result.set_index('level_0', inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _to_sql_save_index(self):
df = DataFrame.from_records([(1,2.1,'line1'), (2,1.5,'line2')],
columns=['A','B','C'], index=['A'])
self.pandasSQL.to_sql(df, 'test_to_sql_saves_index')
ix_cols = self._get_index_columns('test_to_sql_saves_index')
self.assertEqual(ix_cols, [['A',],])
def _transaction_test(self):
self.pandasSQL.execute("CREATE TABLE test_trans (A INT, B TEXT)")
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
# Make sure when transaction is rolled back, no rows get inserted
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise Exception('error')
except:
# ignore raised exception
pass
res = self.pandasSQL.read_query('SELECT * FROM test_trans')
self.assertEqual(len(res), 0)
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query('SELECT * FROM test_trans')
self.assertEqual(len(res2), 1)
#------------------------------------------------------------------------------
#--- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode (`TestSQLiteFallbackApi`).
These tests are run with sqlite3. Specific tests for the different
sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = 'sqlite'
mode = None
def setUp(self):
self.conn = self.connect()
self._load_iris_data()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query(
"SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_legacy_read_frame(self):
with tm.assert_produces_warning(FutureWarning):
iris_frame = sql.read_frame(
"SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn, flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame1', self.conn, flavor='sqlite'), 'Table not written to DB')
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, 'test_frame2',
self.conn, flavor='sqlite', if_exists='fail')
self.assertTrue(
sql.has_table('test_frame2', self.conn, flavor='sqlite'), 'Table not written to DB')
self.assertRaises(ValueError, sql.to_sql, self.test_frame1,
'test_frame2', self.conn, flavor='sqlite', if_exists='fail')
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='replace')
self.assertTrue(
sql.has_table('test_frame3', self.conn, flavor='sqlite'),
'Table not written to DB')
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame3')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='append')
self.assertTrue(
sql.has_table('test_frame4', self.conn, flavor='sqlite'),
'Table not written to DB')
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame4')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, 'test_frame5',
self.conn, flavor='sqlite', index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype='int64'), name='series')
sql.to_sql(s, "test_series", self.conn, flavor='sqlite', index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_to_sql_panel(self):
panel = tm.makePanel()
self.assertRaises(NotImplementedError, sql.to_sql, panel,
'test_panel', self.conn, flavor='sqlite')
def test_legacy_write_frame(self):
# Assume that functionality is already tested above so just do
# quick check that it basically works
with tm.assert_produces_warning(FutureWarning):
sql.write_frame(self.test_frame1, 'test_frame_legacy', self.conn,
flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame_legacy', self.conn, flavor='sqlite'),
'Table not written to DB')
def test_roundtrip(self):
sql.to_sql(self.test_frame1, 'test_frame_roundtrip',
con=self.conn, flavor='sqlite')
result = sql.read_sql_query(
'SELECT * FROM test_frame_roundtrip',
con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index('level_0', inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(self.test_frame1, 'test_frame_roundtrip', con=self.conn,
index=False, flavor='sqlite', chunksize=2)
result = sql.read_sql_query(
'SELECT * FROM test_frame_roundtrip',
con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_date_parsing(self):
# Test date parsing in read_sq
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
self.assertFalse(
issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates=['DateCol'])
self.assertTrue(
issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(
issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates=['IntDateCol'])
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
index_col='DateCol',
parse_dates=['DateCol', 'IntDateCol'])
self.assertTrue(issubclass(df.index.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(['00:00:01', '00:00:03'], name='foo')).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql('test_timedelta', self.conn)
result = sql.read_sql_query('SELECT * FROM test_timedelta', self.conn)
tm.assert_series_equal(result['foo'], df['foo'].astype('int64'))
def test_complex(self):
df = DataFrame({'a':[1+1j, 2j]})
# Complex data type should raise error
self.assertRaises(ValueError, df.to_sql, 'test_complex', self.conn)
def test_to_sql_index_label(self):
temp_frame = DataFrame({'col1': range(4)})
# no index name, defaults to 'index'
sql.to_sql(temp_frame, 'test_index_label', self.conn)
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'index')
# specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label='other_label')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'other_label',
"Specified index_label not written to database")
# using the index name
temp_frame.index.name = 'index_name'
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'index_name',
"Index name not written to database")
# has index name, but specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label='other_label')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'other_label',
"Specified index_label not written to database")
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame({'col1': range(4)},
index=MultiIndex.from_product([('A0', 'A1'), ('B0', 'B1')]))
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, 'test_index_label', self.conn)
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'level_0')
self.assertEqual(frame.columns[1], 'level_1')
# specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=['A', 'B'])
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[:2].tolist(), ['A', 'B'],
"Specified index_labels not written to database")
# using the index name
temp_frame.index.names = ['A', 'B']
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[:2].tolist(), ['A', 'B'],
"Index names not written to database")
# has index name, but specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=['C', 'D'])
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[:2].tolist(), ['C', 'D'],
"Specified index_labels not written to database")
# wrong length of index_label
self.assertRaises(ValueError, sql.to_sql, temp_frame,
'test_index_label', self.conn, if_exists='replace',
index_label='C')
def test_multiindex_roundtrip(self):
df = DataFrame.from_records([(1,2.1,'line1'), (2,1.5,'line2')],
columns=['A','B','C'], index=['A','B'])
df.to_sql('test_multiindex_roundtrip', self.conn)
result = sql.read_sql_query('SELECT * FROM test_multiindex_roundtrip',
self.conn, index_col=['A','B'])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn,
if_exists='replace')
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, 'test', 'sqlite',
con=self.conn)
self.assertTrue('CREATE' in create_sql)
def test_get_schema_dtypes(self):
float_frame = DataFrame({'a':[1.1,1.2], 'b':[2.1,2.2]})
dtype = sqlalchemy.Integer if self.mode == 'sqlalchemy' else 'INTEGER'
create_sql = sql.get_schema(float_frame, 'test', 'sqlite',
con=self.conn, dtype={'b':dtype})
self.assertTrue('CREATE' in create_sql)
self.assertTrue('INTEGER' in create_sql)
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list('abcde'))
df.to_sql('test_chunksize', self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query("select * from test_chunksize",
self.conn, chunksize=5):
res2 = concat([res2, chunk], ignore_index=True)
self.assertEqual(len(chunk), sizes[i])
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == 'sqlalchemy':
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn,
chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
self.assertEqual(len(chunk), sizes[i])
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{'person_id': [1, 2, 3],
'person_name': ['John P. Doe', 'Jane Dove', 'John P. Doe']})
df2 = df.copy()
df2['person_name'] = df2['person_name'].astype('category')
df2.to_sql('test_categorical', self.conn, index=False)
res = sql.read_sql_query('SELECT * FROM test_categorical', self.conn)
tm.assert_frame_equal(res, df)
class TestSQLApi(_TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = 'sqlite'
mode = 'sqlalchemy'
def connect(self):
if SQLALCHEMY_INSTALLED:
return sqlalchemy.create_engine('sqlite:///:memory:')
else:
raise nose.SkipTest('SQLAlchemy not installed')
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, 'test_frame', self.conn)
cols = ['A', 'B']
result = sql.read_sql_table('test_frame', self.conn, columns=cols)
self.assertEqual(result.columns.tolist(), cols,
"Columns not correctly selected")
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, 'test_frame', self.conn)
result = sql.read_sql_table('test_frame', self.conn, index_col="index")
self.assertEqual(result.index.names, ["index"],
"index_col not correctly set")
result = sql.read_sql_table('test_frame', self.conn, index_col=["A", "B"])
self.assertEqual(result.index.names, ["A", "B"],
"index_col not correctly set")
result = sql.read_sql_table('test_frame', self.conn, index_col=["A", "B"],
columns=["C", "D"])
self.assertEqual(result.index.names, ["A", "B"],
"index_col not correctly set")
self.assertEqual(result.columns.tolist(), ["C", "D"],
"columns not set correctly whith index_col")
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query(
"SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql(
"SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table('iris', self.conn)
iris_frame2 = sql.read_sql('iris', self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table('other_table', self.conn)
sql.read_sql_query('SELECT * FROM other_table', self.conn)
# Verify some things
self.assertEqual(len(w), 0, "Warning triggered for other table")
def test_warning_case_insensitive_table_name(self):
# see GH7815.
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql('CaseSensitive', self.conn)
# Verify some things
self.assertEqual(len(w), 0, "Warning triggered for writing a table")
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes('test_index_saved')
ixs = [i['column_names'] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame({'time': to_datetime(['201412120154', '201412110254'],
utc=True)})
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
self.assertTrue(isinstance(table.table.c['time'].type, sqltypes.DateTime))
class TestSQLiteFallbackApi(_TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = 'sqlite'
mode = 'fallback'
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy", conn,
flavor="sqlite", index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;",
conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
self.assertRaises(sql.DatabaseError, sql.read_sql, 'iris', self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b ']) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn,
flavor="sqlite", index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, 'test', 'sqlite')
self.assertTrue('CREATE' in create_sql)
def test_tquery(self):
with tm.assert_produces_warning(FutureWarning):
iris_results = sql.tquery("SELECT * FROM iris", con=self.conn)
row = iris_results[0]
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_uquery(self):
with tm.assert_produces_warning(FutureWarning):
rows = sql.uquery("SELECT * FROM iris LIMIT 1", con=self.conn)
self.assertEqual(rows, -1)
def _get_sqlite_column_type(self, schema, column):
for col in schema.split('\n'):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError('Column %s not found' % (column))
def test_sqlite_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame({'time': to_datetime(['201412120154', '201412110254'],
utc=True)})
db = sql.SQLiteDatabase(self.conn, self.flavor)
table = sql.SQLiteTable("test_type", db, frame=df)
schema = table.sql_schema()
self.assertEqual(self._get_sqlite_column_type(schema, 'time'),
"TIMESTAMP")
#------------------------------------------------------------------------------
#--- Database flavor specific tests
class _TestSQLAlchemy(PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor = None
@classmethod
def setUpClass(cls):
cls.setup_import()
cls.setup_driver()
# test connection
try:
conn = cls.connect()
conn.connect()
except sqlalchemy.exc.OperationalError:
msg = "{0} - can't connect to {1} server".format(cls, cls.flavor)
raise nose.SkipTest(msg)
def setUp(self):
self.setup_connect()
self._load_iris_data()
self._load_raw_sql()
self._load_test1_data()
@classmethod
def setup_import(cls):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
raise nose.SkipTest('SQLAlchemy not installed')
@classmethod
def setup_driver(cls):
raise NotImplementedError()
@classmethod
def connect(cls):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.SQLDatabase(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
raise nose.SkipTest("Can't connect to {0} server".format(self.flavor))
def tearDown(self):
raise NotImplementedError()
def test_aread_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, 'temp_frame')
self.assertTrue(
temp_conn.has_table('temp_frame'), 'Table not written to DB')
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, 'temp_frame')
self.assertTrue(
temp_conn.has_table('temp_frame'), 'Table not written to DB')
pandasSQL.drop_table('temp_frame')
self.assertFalse(
temp_conn.has_table('temp_frame'), 'Table not deleted from DB')
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=['SepalLength', 'SepalLength'])
tm.equalContents(
iris_frame.columns.values, ['SepalLength', 'SepalLength'])
def test_read_table_absent(self):
self.assertRaises(
ValueError, sql.read_sql_table, "this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
self.assertTrue(issubclass(df.BoolCol.dtype.type, np.bool_),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Bool column with NA values becomes object
self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.object),
"BoolColWithNull loaded with incorrect type")
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={'i64':[2**62]})
df.to_sql('test_bigint', self.conn, index=False)
result = sql.read_sql_table('test_bigint', self.conn)
tm.assert_frame_equal(df, result)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates=['DateCol'])
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_table("types_test_data", self.conn, parse_dates={
'DateCol': {'format': '%Y-%m-%d %H:%M:%S'}})
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=['IntDateCol'])
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': {'unit': 's'}})
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_datetime(self):
df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),
'B': np.arange(3.0)})
df.to_sql('test_datetime', self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table('test_datetime', self.conn)
result = result.drop('index', axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)
result = result.drop('index', axis=1)
if self.flavor == 'sqlite':
self.assertTrue(isinstance(result.loc[0, 'A'], string_types))
result['A'] = to_datetime(result['A'])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),
'B': np.arange(3.0)})
df.loc[1, 'A'] = np.nan
df.to_sql('test_datetime', self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table('test_datetime', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)
if self.flavor == 'sqlite':
self.assertTrue(isinstance(result.loc[0, 'A'], string_types))
result['A'] = to_datetime(result['A'], coerce=True)
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql('test_date', self.conn, index=False)
res = read_sql_table('test_date', self.conn)
# comes back as datetime64
tm.assert_series_equal(res['a'], to_datetime(df['a']))
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql('test_time', self.conn, index=False)
res = read_sql_table('test_time', self.conn)
tm.assert_frame_equal(res, df)
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Series(2**25 + 1,dtype=np.int32)
s2 = Series(0.0,dtype=np.float32)
df = DataFrame({'s1': s1, 's2': s2})
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
df2 = sql.read_sql_table("test_read_write", self.conn)
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
def test_nan_numeric(self):
# NaNs in numeric float column
df = DataFrame({'A':[0, 1, 2], 'B':[0.2, np.nan, 5.6]})
df.to_sql('test_nan', self.conn, index=False)
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def test_nan_fullcolumn(self):
# full NaN column (numeric float column)
df = DataFrame({'A':[0, 1, 2], 'B':[np.nan, np.nan, np.nan]})
df.to_sql('test_nan', self.conn, index=False)
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> not type info from table -> stays None
df['B'] = df['B'].astype('object')
df['B'] = None
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def test_nan_string(self):
# NaNs in string column
df = DataFrame({'A':[0, 1, 2], 'B':['a', 'b', np.nan]})
df.to_sql('test_nan', self.conn, index=False)
# NaNs are coming back as None
df.loc[2, 'B'] = None
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes(tbl_name)
ixs = [i['column_names'] for i in ixs]
return ixs
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def test_get_schema_create_table(self):
# Use a dataframe without a bool column, since MySQL converts bool to
# TINYINT (which read_sql_table returns as an int and causes a dtype
# mismatch)
self._load_test3_data()
tbl = 'test_get_schema_create_table'
create_sql = sql.get_schema(self.test_frame3, tbl, con=self.conn)
blank_test_df = self.test_frame3.iloc[:0]
self.drop_table(tbl)
self.conn.execute(create_sql)
returned_df = sql.read_sql_table(tbl, self.conn)
tm.assert_frame_equal(returned_df, blank_test_df)
self.drop_table(tbl)
def test_dtype(self):
cols = ['A', 'B']
data = [(0.8, True),
(0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql('dtype_test', self.conn)
df.to_sql('dtype_test2', self.conn, dtype={'B': sqlalchemy.TEXT})
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltype = meta.tables['dtype_test2'].columns['B'].type
self.assertTrue(isinstance(sqltype, sqlalchemy.TEXT))
self.assertRaises(ValueError, df.to_sql,
'error', self.conn, dtype={'B': str})
# GH9083
df.to_sql('dtype_test3', self.conn, dtype={'B': sqlalchemy.String(10)})
meta.reflect()
sqltype = meta.tables['dtype_test3'].columns['B'].type
self.assertTrue(isinstance(sqltype, sqlalchemy.String))
self.assertEqual(sqltype.length, 10)
def test_notnull_dtype(self):
cols = {'Bool': Series([True,None]),
'Date': Series([datetime(2012, 5, 1), None]),
'Int' : Series([1, None], dtype='object'),
'Float': Series([1.1, None])
}
df = DataFrame(cols)
tbl = 'notnull_dtype_test'
df.to_sql(tbl, self.conn)
returned_df = sql.read_sql_table(tbl, self.conn)
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
if self.flavor == 'mysql':
my_type = sqltypes.Integer
else:
my_type = sqltypes.Boolean
col_dict = meta.tables[tbl].columns
self.assertTrue(isinstance(col_dict['Bool'].type, my_type))
self.assertTrue(isinstance(col_dict['Date'].type, sqltypes.DateTime))
self.assertTrue(isinstance(col_dict['Int'].type, sqltypes.Integer))
self.assertTrue(isinstance(col_dict['Float'].type, sqltypes.Float))
def test_double_precision(self):
V = 1.23456789101112131415
df = DataFrame({'f32':Series([V,], dtype='float32'),
'f64':Series([V,], dtype='float64'),
'f64_as_f32':Series([V,], dtype='float64'),
'i32':Series([5,], dtype='int32'),
'i64':Series([5,], dtype='int64'),
})
df.to_sql('test_dtypes', self.conn, index=False, if_exists='replace',
dtype={'f64_as_f32':sqlalchemy.Float(precision=23)})
res = sql.read_sql_table('test_dtypes', self.conn)
# check precision of float64
self.assertEqual(np.round(df['f64'].iloc[0],14),
np.round(res['f64'].iloc[0],14))
# check sql types
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
col_dict = meta.tables['test_dtypes'].columns
self.assertEqual(str(col_dict['f32'].type),
str(col_dict['f64_as_f32'].type))
self.assertTrue(isinstance(col_dict['f32'].type, sqltypes.Float))
self.assertTrue(isinstance(col_dict['f64'].type, sqltypes.Float))
self.assertTrue(isinstance(col_dict['i32'].type, sqltypes.Integer))
self.assertTrue(isinstance(col_dict['i64'].type, sqltypes.BigInteger))
class TestSQLiteAlchemy(_TestSQLAlchemy):
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = 'sqlite'
@classmethod
def connect(cls):
return sqlalchemy.create_engine('sqlite:///:memory:')
@classmethod
def setup_driver(cls):
# sqlite3 is built-in
cls.driver = None
def tearDown(self):
# in memory so tables should not be removed explicitly
pass
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
# sqlite has no boolean type, so integer type is returned
self.assertTrue(issubclass(df.BoolCol.dtype.type, np.integer),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Non-native Bool column with NA values stays as float
self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating),
"BoolColWithNull loaded with incorrect type")
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
self.assertFalse(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
def test_bigint_warning(self):
# test no warning for BIGINT (to support int64) is raised (GH7433)
df = DataFrame({'a':[1,2]}, dtype='int64')
df.to_sql('test_bigintwarning', self.conn, index=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sql.read_sql_table('test_bigintwarning', self.conn)
self.assertEqual(len(w), 0, "Warning triggered for other table")
class TestMySQLAlchemy(_TestSQLAlchemy):
"""
Test the sqlalchemy backend against an MySQL database.
"""
flavor = 'mysql'
@classmethod
def connect(cls):
url = 'mysql+{driver}://root@localhost/pandas_nosetest'
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
try:
import pymysql
cls.driver = 'pymysql'
except ImportError:
raise nose.SkipTest('pymysql not installed')
def tearDown(self):
c = self.conn.execute('SHOW TABLES')
for table in c.fetchall():
self.conn.execute('DROP TABLE %s' % table[0])
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
# MySQL has no real BOOL type (it's an alias for TINYINT)
self.assertTrue(issubclass(df.BoolCol.dtype.type, np.integer),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Bool column with NA = int column with NA values => becomes float
self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating),
"BoolColWithNull loaded with incorrect type")
def test_read_procedure(self):
# see GH7324. Although it is more an api test, it is added to the
# mysql tests as sqlite does not have stored procedures
df = DataFrame({'a': [1, 2, 3], 'b':[0.1, 0.2, 0.3]})
df.to_sql('test_procedure', self.conn, index=False)
proc = """DROP PROCEDURE IF EXISTS get_testdb;
CREATE PROCEDURE get_testdb ()
BEGIN
SELECT * FROM test_procedure;
END"""
connection = self.conn.connect()
trans = connection.begin()
try:
r1 = connection.execute(proc)
trans.commit()
except:
trans.rollback()
raise
res1 = sql.read_sql_query("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res1)
# test delegation to read_sql_query
res2 = sql.read_sql("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res2)
class TestPostgreSQLAlchemy(_TestSQLAlchemy):
"""
Test the sqlalchemy backend against an PostgreSQL database.
"""
flavor = 'postgresql'
@classmethod
def connect(cls):
url = 'postgresql+{driver}://postgres@localhost/pandas_nosetest'
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
try:
import psycopg2
cls.driver = 'psycopg2'
except ImportError:
raise nose.SkipTest('psycopg2 not installed')
def tearDown(self):
c = self.conn.execute(
"SELECT table_name FROM information_schema.tables"
" WHERE table_schema = 'public'")
for table in c.fetchall():
self.conn.execute("DROP TABLE %s" % table[0])
def test_schema_support(self):
# only test this for postgresql (schema's not supported in mysql/sqlite)
df = DataFrame({'col1':[1, 2], 'col2':[0.1, 0.2], 'col3':['a', 'n']})
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe to different schema's
df.to_sql('test_schema_public', self.conn, index=False)
df.to_sql('test_schema_public_explicit', self.conn, index=False,
schema='public')
df.to_sql('test_schema_other', self.conn, index=False, schema='other')
# read dataframes back in
res1 = sql.read_sql_table('test_schema_public', self.conn)
tm.assert_frame_equal(df, res1)
res2 = sql.read_sql_table('test_schema_public_explicit', self.conn)
tm.assert_frame_equal(df, res2)
res3 = sql.read_sql_table('test_schema_public_explicit', self.conn,
schema='public')
tm.assert_frame_equal(df, res3)
res4 = sql.read_sql_table('test_schema_other', self.conn,
schema='other')
tm.assert_frame_equal(df, res4)
self.assertRaises(ValueError, sql.read_sql_table, 'test_schema_other',
self.conn, schema='public')
## different if_exists options
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe with different if_exists options
df.to_sql('test_schema_other', self.conn, schema='other', index=False)
df.to_sql('test_schema_other', self.conn, schema='other', index=False,
if_exists='replace')
df.to_sql('test_schema_other', self.conn, schema='other', index=False,
if_exists='append')
res = sql.read_sql_table('test_schema_other', self.conn, schema='other')
tm.assert_frame_equal(concat([df, df], ignore_index=True), res)
## specifying schema in user-provided meta
engine2 = self.connect()
meta = sqlalchemy.MetaData(engine2, schema='other')
pdsql = sql.SQLDatabase(engine2, meta=meta)
pdsql.to_sql(df, 'test_schema_other2', index=False)
pdsql.to_sql(df, 'test_schema_other2', index=False, if_exists='replace')
pdsql.to_sql(df, 'test_schema_other2', index=False, if_exists='append')
res1 = sql.read_sql_table('test_schema_other2', self.conn, schema='other')
res2 = pdsql.read_table('test_schema_other2')
tm.assert_frame_equal(res1, res2)
def test_datetime_with_time_zone(self):
# Test to see if we read the date column with timezones that
# the timezone information is converted to utc and into a
# np.datetime64 (GH #7139)
df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.DateColWithTz.dtype.type, np.datetime64),
"DateColWithTz loaded with incorrect type")
# "2000-01-01 00:00:00-08:00" should convert to "2000-01-01 08:00:00"
self.assertEqual(df.DateColWithTz[0], Timestamp('2000-01-01 08:00:00'))
# "2000-06-01 00:00:00-07:00" should convert to "2000-06-01 07:00:00"
self.assertEqual(df.DateColWithTz[1], Timestamp('2000-06-01 07:00:00'))
#------------------------------------------------------------------------------
#--- Test Sqlite / MySQL fallback
class TestSQLiteFallback(PandasSQLTest):
"""
Test the fallback mode against an in-memory sqlite database.
"""
flavor = 'sqlite'
@classmethod
def connect(cls):
return sqlite3.connect(':memory:')
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute("DROP TABLE IF EXISTS %s" % table_name)
self.conn.commit()
def setUp(self):
self.conn = self.connect()
self.pandasSQL = sql.SQLiteDatabase(self.conn, 'sqlite')
self._load_iris_data()
self._load_test1_data()
def test_invalid_flavor(self):
self.assertRaises(
NotImplementedError, sql.SQLiteDatabase, self.conn, 'oracle')
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_create_and_drop_table(self):
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
self.pandasSQL.to_sql(temp_frame, 'drop_test_frame')
self.assertTrue(self.pandasSQL.has_table('drop_test_frame'),
'Table not written to DB')
self.pandasSQL.drop_table('drop_test_frame')
self.assertFalse(self.pandasSQL.has_table('drop_test_frame'),
'Table not deleted from DB')
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql('test_date', self.conn, index=False, flavor=self.flavor)
res = read_sql_query('SELECT * FROM test_date', self.conn)
if self.flavor == 'sqlite':
# comes back as strings
tm.assert_frame_equal(res, df.astype(str))
elif self.flavor == 'mysql':
tm.assert_frame_equal(res, df)
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
# test it raises an error and not fails silently (GH8341)
if self.flavor == 'sqlite':
self.assertRaises(sqlite3.InterfaceError, sql.to_sql, df,
'test_time', self.conn)
def _get_index_columns(self, tbl_name):
ixs = sql.read_sql_query(
"SELECT * FROM sqlite_master WHERE type = 'index' " +
"AND tbl_name = '%s'" % tbl_name, self.conn)
ix_cols = []
for ix_name in ixs.name:
ix_info = sql.read_sql_query(
"PRAGMA index_info(%s)" % ix_name, self.conn)
ix_cols.append(ix_info.name.tolist())
return ix_cols
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def _get_sqlite_column_type(self, table, column):
recs = self.conn.execute('PRAGMA table_info(%s)' % table)
for cid, name, ctype, not_null, default, pk in recs:
if name == column:
return ctype
raise ValueError('Table %s, column %s not found' % (table, column))
def test_dtype(self):
if self.flavor == 'mysql':
raise nose.SkipTest('Not applicable to MySQL legacy')
cols = ['A', 'B']
data = [(0.8, True),
(0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql('dtype_test', self.conn)
df.to_sql('dtype_test2', self.conn, dtype={'B': 'STRING'})
# sqlite stores Boolean values as INTEGER
self.assertEqual(self._get_sqlite_column_type('dtype_test', 'B'), 'INTEGER')
self.assertEqual(self._get_sqlite_column_type('dtype_test2', 'B'), 'STRING')
self.assertRaises(ValueError, df.to_sql,
'error', self.conn, dtype={'B': bool})
def test_notnull_dtype(self):
if self.flavor == 'mysql':
raise nose.SkipTest('Not applicable to MySQL legacy')
cols = {'Bool': Series([True,None]),
'Date': Series([datetime(2012, 5, 1), None]),
'Int' : Series([1, None], dtype='object'),
'Float': Series([1.1, None])
}
df = DataFrame(cols)
tbl = 'notnull_dtype_test'
df.to_sql(tbl, self.conn)
self.assertEqual(self._get_sqlite_column_type(tbl, 'Bool'), 'INTEGER')
self.assertEqual(self._get_sqlite_column_type(tbl, 'Date'), 'TIMESTAMP')
self.assertEqual(self._get_sqlite_column_type(tbl, 'Int'), 'INTEGER')
self.assertEqual(self._get_sqlite_column_type(tbl, 'Float'), 'REAL')
def test_illegal_names(self):
# For sqlite, these should work fine
df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
# Raise error on blank
self.assertRaises(ValueError, df.to_sql, "", self.conn,
flavor=self.flavor)
for ndx, weird_name in enumerate(['test_weird_name]','test_weird_name[',
'test_weird_name`','test_weird_name"', 'test_weird_name\'',
'_b.test_weird_name_01-30', '"_b.test_weird_name_01-30"',
'12345','12345blah']):
df.to_sql(weird_name, self.conn, flavor=self.flavor)
sql.table_exists(weird_name, self.conn)
df2 = DataFrame([[1, 2], [3, 4]], columns=['a', weird_name])
c_tbl = 'test_weird_col_name%d'%ndx
df2.to_sql(c_tbl, self.conn, flavor=self.flavor)
sql.table_exists(c_tbl, self.conn)
class TestMySQLLegacy(TestSQLiteFallback):
"""
Test the legacy mode against a MySQL database.
"""
flavor = 'mysql'
@classmethod
def setUpClass(cls):
cls.setup_driver()
# test connection
try:
cls.connect()
except cls.driver.err.OperationalError:
raise nose.SkipTest("{0} - can't connect to MySQL server".format(cls))
@classmethod
def setup_driver(cls):
try:
import pymysql
cls.driver = pymysql
except ImportError:
raise nose.SkipTest('pymysql not installed')
@classmethod
def connect(cls):
return cls.driver.connect(host='127.0.0.1', user='root', passwd='', db='pandas_nosetest')
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute("DROP TABLE IF EXISTS %s" % table_name)
self.conn.commit()
def _count_rows(self, table_name):
cur = self._get_exec()
cur.execute(
"SELECT count(*) AS count_1 FROM %s" % table_name)
rows = cur.fetchall()
return rows[0][0]
def setUp(self):
try:
self.conn = self.connect()
except self.driver.err.OperationalError:
raise nose.SkipTest("Can't connect to MySQL server")
self.pandasSQL = sql.SQLiteDatabase(self.conn, 'mysql')
self._load_iris_data()
self._load_test1_data()
def tearDown(self):
c = self.conn.cursor()
c.execute('SHOW TABLES')
for table in c.fetchall():
c.execute('DROP TABLE %s' % table[0])
self.conn.commit()
self.conn.close()
def test_a_deprecation(self):
with tm.assert_produces_warning(FutureWarning):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn,
flavor='mysql')
self.assertTrue(
sql.has_table('test_frame1', self.conn, flavor='mysql'),
'Table not written to DB')
def _get_index_columns(self, tbl_name):
ixs = sql.read_sql_query(
"SHOW INDEX IN %s" % tbl_name, self.conn)
ix_cols = {}
for ix_name, ix_col in zip(ixs.Key_name, ixs.Column_name):
if ix_name not in ix_cols:
ix_cols[ix_name] = []
ix_cols[ix_name].append(ix_col)
return list(ix_cols.values())
def test_to_sql_save_index(self):
self._to_sql_save_index()
for ix_name, ix_col in zip(ixs.Key_name, ixs.Column_name):
if ix_name not in ix_cols:
ix_cols[ix_name] = []
ix_cols[ix_name].append(ix_col)
return ix_cols.values()
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_illegal_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
# These tables and columns should be ok
for ndx, ok_name in enumerate(['99beginswithnumber','12345']):
df.to_sql(ok_name, self.conn, flavor=self.flavor, index=False,
if_exists='replace')
self.conn.cursor().execute("DROP TABLE `%s`" % ok_name)
self.conn.commit()
df2 = DataFrame([[1, 2], [3, 4]], columns=['a', ok_name])
c_tbl = 'test_ok_col_name%d'%ndx
df2.to_sql(c_tbl, self.conn, flavor=self.flavor, index=False,
if_exists='replace')
self.conn.cursor().execute("DROP TABLE `%s`" % c_tbl)
self.conn.commit()
# For MySQL, these should raise ValueError
for ndx, illegal_name in enumerate(['test_illegal_name]','test_illegal_name[',
'test_illegal_name`','test_illegal_name"', 'test_illegal_name\'', '']):
self.assertRaises(ValueError, df.to_sql, illegal_name, self.conn,
flavor=self.flavor, index=False)
df2 = DataFrame([[1, 2], [3, 4]], columns=['a', illegal_name])
c_tbl = 'test_illegal_col_name%d'%ndx
self.assertRaises(ValueError, df2.to_sql, c_tbl,
self.conn, flavor=self.flavor, index=False)
#------------------------------------------------------------------------------
#--- Old tests from 0.13.1 (before refactor using sqlalchemy)
_formatters = {
datetime: lambda dt: "'%s'" % date_format(dt),
str: lambda x: "'%s'" % x,
np.str_: lambda x: "'%s'" % x,
compat.text_type: lambda x: "'%s'" % x,
compat.binary_type: lambda x: "'%s'" % x,
float: lambda x: "%.8f" % x,
int: lambda x: "%s" % x,
type(None): lambda x: "NULL",
np.float64: lambda x: "%.10f" % x,
bool: lambda x: "'%s'" % x,
}
def format_query(sql, *args):
"""
"""
processed_args = []
for arg in args:
if isinstance(arg, float) and isnull(arg):
arg = None
formatter = _formatters[type(arg)]
processed_args.append(formatter(arg))
return sql % tuple(processed_args)
def _skip_if_no_pymysql():
try:
import pymysql
except ImportError:
raise nose.SkipTest('pymysql not installed, skipping')
class TestXSQLite(tm.TestCase):
def setUp(self):
self.db = sqlite3.connect(':memory:')
def test_basic(self):
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
frame = tm.makeTimeDataFrame()
frame.ix[0, 0] = np.nan
create_sql = sql.get_schema(frame, 'test', 'sqlite')
cur = self.db.cursor()
cur.execute(create_sql)
cur = self.db.cursor()
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
sql.tquery(fmt_sql, cur=cur)
self.db.commit()
result = sql.read_frame("select * from test", con=self.db)
result.index = frame.index
tm.assert_frame_equal(result, frame)
def test_execute(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', 'sqlite')
cur = self.db.cursor()
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (?, ?, ?, ?)"
row = frame.ix[0]
sql.execute(ins, self.db, params=tuple(row))
self.db.commit()
result = sql.read_frame("select * from test", self.db)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', 'sqlite')
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(' ')
if len(tokens) == 2 and tokens[0] == 'A':
self.assertTrue(tokens[1] == 'DATETIME')
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', 'sqlite', keys=['A', 'B'],)
lines = create_sql.splitlines()
self.assertTrue('PRIMARY KEY ("A","B")' in create_sql)
cur = self.db.cursor()
cur.execute(create_sql)
def test_execute_fail(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.db.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.db)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.db)
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.execute,
'INSERT INTO test VALUES("foo", "bar", 7)',
self.db)
finally:
sys.stdout = sys.__stdout__
def test_execute_closed_connection(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.db.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.db)
self.db.close()
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.tquery, "select * from test",
con=self.db)
finally:
sys.stdout = sys.__stdout__
def test_na_roundtrip(self):
pass
def _check_roundtrip(self, frame):
sql.write_frame(frame, name='test_table', con=self.db)
result = sql.read_frame("select * from test_table", self.db)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
expected = frame
tm.assert_frame_equal(result, expected)
frame['txt'] = ['a'] * len(frame)
frame2 = frame.copy()
frame2['Idx'] = Index(lrange(len(frame2))) + 10
sql.write_frame(frame2, name='test_table2', con=self.db)
result = sql.read_frame("select * from test_table2", self.db,
index_col='Idx')
expected = frame.copy()
expected.index = Index(lrange(len(frame2))) + 10
expected.index.name = 'Idx'
tm.assert_frame_equal(expected, result)
def test_tquery(self):
frame = tm.makeTimeDataFrame()
sql.write_frame(frame, name='test_table', con=self.db)
result = sql.tquery("select A from test_table", self.db)
expected = Series(frame.A.values, frame.index) # not to have name
result = Series(result, frame.index)
tm.assert_series_equal(result, expected)
try:
sys.stdout = StringIO()
self.assertRaises(sql.DatabaseError, sql.tquery,
'select * from blah', con=self.db)
self.assertRaises(sql.DatabaseError, sql.tquery,
'select * from blah', con=self.db, retry=True)
finally:
sys.stdout = sys.__stdout__
def test_uquery(self):
frame = tm.makeTimeDataFrame()
sql.write_frame(frame, name='test_table', con=self.db)
stmt = 'INSERT INTO test_table VALUES(2.314, -123.1, 1.234, 2.3)'
self.assertEqual(sql.uquery(stmt, con=self.db), 1)
try:
sys.stdout = StringIO()
self.assertRaises(sql.DatabaseError, sql.tquery,
'insert into blah values (1)', con=self.db)
self.assertRaises(sql.DatabaseError, sql.tquery,
'insert into blah values (1)', con=self.db,
retry=True)
finally:
sys.stdout = sys.__stdout__
def test_keyword_as_column_names(self):
'''
'''
df = DataFrame({'From':np.ones(5)})
sql.write_frame(df, con = self.db, name = 'testkeywords')
def test_onecolumn_of_integer(self):
# GH 3628
# a column_of_integers dataframe should transfer well to sql
mono_df=DataFrame([1 , 2], columns=['c0'])
sql.write_frame(mono_df, con = self.db, name = 'mono_df')
# computing the sum via sql
con_x=self.db
the_sum=sum([my_c0[0] for my_c0 in con_x.execute("select * from mono_df")])
# it should not fail, and gives 3 ( Issue #3628 )
self.assertEqual(the_sum , 3)
result = sql.read_frame("select * from mono_df",con_x)
tm.assert_frame_equal(result,mono_df)
def test_if_exists(self):
df_if_exists_1 = DataFrame({'col1': [1, 2], 'col2': ['A', 'B']})
df_if_exists_2 = DataFrame({'col1': [3, 4, 5], 'col2': ['C', 'D', 'E']})
table_name = 'table_if_exists'
sql_select = "SELECT * FROM %s" % table_name
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
if sql.table_exists(test_table_to_drop, self.db, flavor='sqlite'):
cur = self.db.cursor()
cur.execute("DROP TABLE %s" % test_table_to_drop)
cur.close()
# test if invalid value for if_exists raises appropriate error
self.assertRaises(ValueError,
sql.write_frame,
frame=df_if_exists_1,
con=self.db,
name=table_name,
flavor='sqlite',
if_exists='notvalidvalue')
clean_up(table_name)
# test if_exists='fail'
sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name,
flavor='sqlite', if_exists='fail')
self.assertRaises(ValueError,
sql.write_frame,
frame=df_if_exists_1,
con=self.db,
name=table_name,
flavor='sqlite',
if_exists='fail')
# test if_exists='replace'
sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name,
flavor='sqlite', if_exists='replace')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(1, 'A'), (2, 'B')])
sql.write_frame(frame=df_if_exists_2, con=self.db, name=table_name,
flavor='sqlite', if_exists='replace')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
# test if_exists='append'
sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name,
flavor='sqlite', if_exists='fail')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(1, 'A'), (2, 'B')])
sql.write_frame(frame=df_if_exists_2, con=self.db, name=table_name,
flavor='sqlite', if_exists='append')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
class TestXMySQL(tm.TestCase):
@classmethod
def setUpClass(cls):
_skip_if_no_pymysql()
# test connection
import pymysql
try:
# Try Travis defaults.
# No real user should allow root access with a blank password.
pymysql.connect(host='localhost', user='root', passwd='',
db='pandas_nosetest')
except:
pass
else:
return
try:
pymysql.connect(read_default_group='pandas')
except pymysql.ProgrammingError as e:
raise nose.SkipTest(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
except pymysql.Error as e:
raise nose.SkipTest(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
def setUp(self):
_skip_if_no_pymysql()
import pymysql
try:
# Try Travis defaults.
# No real user should allow root access with a blank password.
self.db = pymysql.connect(host='localhost', user='root', passwd='',
db='pandas_nosetest')
except:
pass
else:
return
try:
self.db = pymysql.connect(read_default_group='pandas')
except pymysql.ProgrammingError as e:
raise nose.SkipTest(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
except pymysql.Error as e:
raise nose.SkipTest(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
def tearDown(self):
from pymysql.err import Error
try:
self.db.close()
except Error:
pass
def test_basic(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
frame.ix[0, 0] = np.nan
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test', 'mysql')
cur = self.db.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
sql.tquery(fmt_sql, cur=cur)
self.db.commit()
result = sql.read_frame("select * from test", con=self.db)
result.index = frame.index
tm.assert_frame_equal(result, frame)
def test_execute(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test', 'mysql')
cur = self.db.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
row = frame.ix[0].values.tolist()
sql.execute(ins, self.db, params=tuple(row))
self.db.commit()
result = sql.read_frame("select * from test", self.db)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', 'mysql')
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(' ')
if len(tokens) == 2 and tokens[0] == 'A':
self.assertTrue(tokens[1] == 'DATETIME')
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test', 'mysql', keys=['A', 'B'],)
lines = create_sql.splitlines()
self.assertTrue('PRIMARY KEY (`A`,`B`)' in create_sql)
cur = self.db.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
def test_execute_fail(self):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.db.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.db)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.db)
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.execute,
'INSERT INTO test VALUES("foo", "bar", 7)',
self.db)
finally:
sys.stdout = sys.__stdout__
def test_execute_closed_connection(self):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.db.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.db)
self.db.close()
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.tquery, "select * from test",
con=self.db)
finally:
sys.stdout = sys.__stdout__
def test_na_roundtrip(self):
_skip_if_no_pymysql()
pass
def _check_roundtrip(self, frame):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.db.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.write_frame(frame, name='test_table', con=self.db, flavor='mysql')
result = sql.read_frame("select * from test_table", self.db)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
result.index.name = frame.index.name
expected = frame
tm.assert_frame_equal(result, expected)
frame['txt'] = ['a'] * len(frame)
frame2 = frame.copy()
index = Index(lrange(len(frame2))) + 10
frame2['Idx'] = index
drop_sql = "DROP TABLE IF EXISTS test_table2"
cur = self.db.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.write_frame(frame2, name='test_table2', con=self.db, flavor='mysql')
result = sql.read_frame("select * from test_table2", self.db,
index_col='Idx')
expected = frame.copy()
# HACK! Change this once indexes are handled properly.
expected.index = index
expected.index.names = result.index.names
tm.assert_frame_equal(expected, result)
def test_tquery(self):
try:
import pymysql
except ImportError:
raise nose.SkipTest("no pymysql")
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.db.cursor()
cur.execute(drop_sql)
sql.write_frame(frame, name='test_table', con=self.db, flavor='mysql')
result = sql.tquery("select A from test_table", self.db)
expected = Series(frame.A.values, frame.index) # not to have name
result = Series(result, frame.index)
tm.assert_series_equal(result, expected)
try:
sys.stdout = StringIO()
self.assertRaises(sql.DatabaseError, sql.tquery,
'select * from blah', con=self.db)
self.assertRaises(sql.DatabaseError, sql.tquery,
'select * from blah', con=self.db, retry=True)
finally:
sys.stdout = sys.__stdout__
def test_uquery(self):
try:
import pymysql
except ImportError:
raise nose.SkipTest("no pymysql")
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.db.cursor()
cur.execute(drop_sql)
sql.write_frame(frame, name='test_table', con=self.db, flavor='mysql')
stmt = 'INSERT INTO test_table VALUES(2.314, -123.1, 1.234, 2.3)'
self.assertEqual(sql.uquery(stmt, con=self.db), 1)
try:
sys.stdout = StringIO()
self.assertRaises(sql.DatabaseError, sql.tquery,
'insert into blah values (1)', con=self.db)
self.assertRaises(sql.DatabaseError, sql.tquery,
'insert into blah values (1)', con=self.db,
retry=True)
finally:
sys.stdout = sys.__stdout__
def test_keyword_as_column_names(self):
'''
'''
_skip_if_no_pymysql()
df = DataFrame({'From':np.ones(5)})
sql.write_frame(df, con = self.db, name = 'testkeywords',
if_exists='replace', flavor='mysql')
def test_if_exists(self):
_skip_if_no_pymysql()
df_if_exists_1 = DataFrame({'col1': [1, 2], 'col2': ['A', 'B']})
df_if_exists_2 = DataFrame({'col1': [3, 4, 5], 'col2': ['C', 'D', 'E']})
table_name = 'table_if_exists'
sql_select = "SELECT * FROM %s" % table_name
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
if sql.table_exists(test_table_to_drop, self.db, flavor='mysql'):
cur = self.db.cursor()
cur.execute("DROP TABLE %s" % test_table_to_drop)
cur.close()
# test if invalid value for if_exists raises appropriate error
self.assertRaises(ValueError,
sql.write_frame,
frame=df_if_exists_1,
con=self.db,
name=table_name,
flavor='mysql',
if_exists='notvalidvalue')
clean_up(table_name)
# test if_exists='fail'
sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name,
flavor='mysql', if_exists='fail')
self.assertRaises(ValueError,
sql.write_frame,
frame=df_if_exists_1,
con=self.db,
name=table_name,
flavor='mysql',
if_exists='fail')
# test if_exists='replace'
sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name,
flavor='mysql', if_exists='replace')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(1, 'A'), (2, 'B')])
sql.write_frame(frame=df_if_exists_2, con=self.db, name=table_name,
flavor='mysql', if_exists='replace')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
# test if_exists='append'
sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name,
flavor='mysql', if_exists='fail')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(1, 'A'), (2, 'B')])
sql.write_frame(frame=df_if_exists_2, con=self.db, name=table_name,
flavor='mysql', if_exists='append')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
Pragmatismo/TimelapsePi-EasyControl | webcamcap_show_numpy.py | 1 | 8684 | #!/usr/bin/python
import time
import os
import sys
import pygame
import numpy
from PIL import Image, ImageDraw, ImageChops
print("")
print("")
print(" USE l=3 to take a photo every 3 somethings, try a 1000 or 2")
print(" t to take triggered photos ")
print(" cap=/home/pi/folder/ to set caps path other than current dir")
print(" ")
pi_paper = False #updates pi wall paper, use -nopaper to turn it off.
s_val = "10"
c_val = "2"
g_val = "10"
b_val = "15"
x_dim = 1600
y_dim = 896
additonal_commands = "-d/dev/video1 -w"
try:
cappath = os.getcwd()
cappath += "/"
except:
print(" COULD NOT GET CURRENT DIR SET WITH A FLAG ")
cappath = "./"
print(" COULD NOT GET CURRENT DIR SET WITH A FLAG ")
loc_settings = "./camera_settings.txt"
try:
with open(loc_settings, "r") as f:
for line in f:
s_item = line.split("=")
if s_item[0] == "s_val":
s_val = s_item[1].split("\n")[0]
elif s_item[0] == "c_val":
c_val = s_item[1].split("\n")[0]
elif s_item[0] == "g_val":
g_val = s_item[1].split("\n")[0]
elif s_item[0] == "b_val":
b_val = s_item[1].split("\n")[0]
elif s_item[0] == "x_dim":
x_dim = s_item[1].split("\n")[0]
elif s_item[0] == "y_dim":
y_dim = s_item[1].split("\n")[0]
elif s_item[0] == "additonal_commands":
additonal_commands = s_item[1].split("\n")[0]
except:
print("No config file for camera, using default")
print("Run cam_config.py to create one")
def photo():
# take and save photo
timenow = time.time()
timenow = str(timenow)[0:10]
filename= "cap_"+str(timenow)+".jpg"
#os.system("uvccapture "+additonal_commands+" -S"+s_val+" -C" + c_val + " -G"+ g_val +" -B"+ b_val +" -x"+str(x_dim)+" -y"+str(y_dim)+" -v -t0 -o"+cappath+filename)
cmd = str("uvccapture "+additonal_commands+" -x"+str(x_dim)+" -y"+str(y_dim)+" -v -t0 -o"+cappath+filename)
print("####")
print("####")
print cmd
print("####")
print("####")
os.system(cmd)
print("Image taken and saved to "+cappath+filename)
if pi_paper == True:
os.system("export DISPLAY=:0 && pcmanfm --set-wallpaper "+cappath+filename)
return filename
if 'wp' in sys.argv or 'wallpaper' in sys.argv:
pi_paper = True
print(" Going to try changing wall paper")
loop = False
trig = False
for argu in sys.argv[1:]:
try:
thearg = str(argu).split('=')[0]
except:
thearg = str(argu)
if thearg == 'cap' or thearg =='cappath':
cappath = str(argu).split('=')[1]
elif thearg == 'l' or thearg == 'looped':
try:
num = int(str(argu).split('=')[1])
except:
print("No speed supplied, taking every 10")
num = 10
loop = True
elif thearg == 't' or thearg == 'TRIGGERED':
trig = True
print(" Saving files to, " + str(cappath))
pygame.init()
display_width = x_dim
display_height = y_dim
gameDisplay = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('Most recent image')
black = (0,0,0)
white = (255,255,255)
clock = pygame.time.Clock()
crashed = False
import matplotlib.pyplot as plt
def show_pic(imgtaken, x=0,y=0):
gameDisplay.blit(imgtaken, (x,y))
gameDisplay.fill(white)
c_photo = photo()
pil_c_photo = Image.open(c_photo)
numpy_pic = numpy.array(pil_c_photo)
b_photo = photo()
pil_b_photo = Image.open(b_photo)
numpy_pic_b = numpy.array(pil_b_photo)
mask = numpy_pic_b > numpy_pic + 30 #the +30 gets rid of noise
mask2 = numpy_pic_b < numpy_pic - 30
lol = mask + mask2
e_pic = numpy_pic.copy()
num = 0
while not crashed:
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
timenow = time.time()
e_photo = str(timenow).split(".")[0]
e_photo= "numpy_"+str(timenow)+".jpg"
num = num + 1
b_photo = c_photo
c_photo = photo()
numpy_pic_b = numpy_pic.copy()
pil_c_photo = Image.open(c_photo)
numpy_pic = numpy.array(pil_c_photo)
print numpy_pic.size
#print len(numpy_pic[3])
print "###"
#print numpy_pic[1:,1,1]
#a = np.arange(100)
print "##########"
#numpy_pic[1:500, range(0, len(numpy_pic[2]), 10), 1] = 0
#for x in numpy_pic[1:500, range(0, len(numpy_pic[2])), 1]:
# if x >= 100:
# x = 255
#for x in range(10,170,10):
# mask = numpy_pic < x
# numpy_pic[mask] = 255-x #numpy_pic[mask] + numpy_pic[mask]
#for x in range(200,255,5):
# mask = numpy_pic > x
# numpy_pic[mask] = 0+(x/10) # numpy_pic[mask] / numpy_pic[mask]+(numpy_pic[mask]/numpy_pic[mask])
#print numpy_pic[1:,1,1]
#print numpy_pic.min()
print "###"
#print numpy_pic.shape #Array dimensions
#print numpy_pic.ndim #Number of array dimensions
#print numpy_pic.dtype #Data type of array elements
#print numpy_pic.dtype.name #Name of data type
#print numpy_pic.mean()
#print numpy_pic.max()
#print numpy_pic.min()
#print numpy.info(numpy.ndarray.dtype)
#print numpy_pic.astype(int)
#mask = numpy_pic > numpy_pic_b
#mask = numpy_pic[:, :, 2] > 150
#numpy_pic[mask] = [0, 0, 255]
#lol = numpy_pic +
#mask = numpy_pic_b > numpy_pic + 30 #the +30 gets rid of noise
#mask2 = numpy_pic_b < numpy_pic - 30
margin = 20
maskr = numpy_pic[:, :, 0] < numpy_pic_b[:, :, 0] - margin
maskg = numpy_pic[:, :, 1] < numpy_pic_b[:, :, 1] - margin
maskb = numpy_pic[:, :, 2] < numpy_pic_b[:, :, 2] - margin
maskr2 = numpy_pic[:, :, 0] > numpy_pic_b[:, :, 0] + margin
maskg2 = numpy_pic[:, :, 1] > numpy_pic_b[:, :, 1] + margin
maskb2 = numpy_pic[:, :, 2] > numpy_pic_b[:, :, 2] + margin
#numpy_pic[mask] = [0, 0, 255]
#lol_old = lol
#lol = mask + mask2
#lol = lol + lol_old
persist = 'ohhh'
if persist == 'True':
numpy_pic[maskr] = [255, 0, 0]
numpy_pic[maskg] = [0, 255, 0]
numpy_pic[maskb] = [0, 0, 255]
numpy_pic[maskb2] = [0, 0, 100]
numpy_pic[maskr2] = [100, 0, 0]
numpy_pic[maskg2] = [0, 100, 0]
Image.fromarray(numpy_pic).save(e_photo)
elif persist == 'False':
old_e = e_pic
e_pic = numpy_pic.copy()
e_pic[maskr] = [255, 0, 0]
e_pic[maskg] = [0, 255, 0]
e_pic[maskb] = [0, 0, 255]
e_pic[maskr2] = [100, 0, 0]
e_pic[maskg2] = [0, 100, 0]
e_pic[maskb2] = [0, 0, 100]
show1 = 'waa'
if show1 == '1':
e_pic = ((e_pic/4) - (numpy_pic))*3
e_pic = e_pic / 3 + old_e / 2
elif show1 == 'tripsy':
e_pic = ((e_pic/4) - (numpy_pic))*3
e_pic = e_pic - old_e / 2
elif show1 == 'waa':
e_pic = ((e_pic/4) - (numpy_pic))*3
#e_pic = old_e * 0.8 + e_pic * 0.2
Image.fromarray(e_pic).save(e_photo)
elif persist == 'ohhh':
old_e = e_pic.copy()
mask_b_pic = numpy_pic.copy()
mask_d_pic = numpy_pic.copy()
mask_b_pic[maskr] = [255, 255, 255]
mask_b_pic[maskg] = [255, 255, 255]
mask_b_pic[maskb] = [255, 255, 255]
mask_d_pic[maskr2] = [0, 0, 0]
mask_d_pic[maskg2] = [0, 0, 0]
mask_d_pic[maskb2] = [0, 0, 0]
#e_pic = e_pic/6 + old_e
e_pic = [200, 200, 0]
#e_pic = e_pic/2 - ((mask_d_pic) + (mask_b_pic))
#e_pic = e_pic/2 + ((mask_d_pic) + (mask_b_pic))
#choose one of the following
#e_pic = mask_d_pic #shows when pixel is darker than it was
#e_pic = mask_b_pic #shows when pixel is lighter than prior
e_pic = mask_d_pic - mask_b_pic #black execpt for movement
e_pic = mask_b_pic / (mask_d_pic / 100) #black execpt for movement
#e_pic = mask_d_pic + mask_b_pic #looks odd
Image.fromarray(e_pic).save(e_photo)
#plt.imshow(lol)
#plt.show()
#Image.fromarray(numpy_pic).save(e_photo)
onscreen = pygame.image.load(e_photo)
gameDisplay.blit(onscreen, (0,0))
pygame.display.update()
if trig == True:
print("Waiting for input before taking next image...")
tp = raw_input("press return to take picture; ")
if tp == "q":
print("---bye!")
exit()
clock.tick(20)
if loop == True:
pygame.time.wait(num)
clock.tick(20)
elif trig == False and loop == False:
crashed = True
#while True:
#pygame.time.wait(1000)
#clock.tick(20)
pygame.quit()
quit()
| gpl-2.0 |
SMTorg/smt | smt/surrogate_models/tests/test_surrogate_model_examples.py | 2 | 17391 | """
Author: John Hwang <<hwangjt@umich.edu>>
This package is distributed under New BSD license.
"""
import unittest
import matplotlib
matplotlib.use("Agg")
try:
from smt.surrogate_models import IDW, RBF, RMTB, RMTC
compiled_available = True
except:
compiled_available = False
class Test(unittest.TestCase):
@unittest.skipIf(not compiled_available, "C compilation failed")
def test_idw(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import IDW
xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0])
sm = IDW(p=2)
sm.set_training_values(xt, yt)
sm.train()
num = 100
x = np.linspace(0.0, 4.0, num)
y = sm.predict_values(x)
plt.plot(xt, yt, "o")
plt.plot(x, y)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(["Training data", "Prediction"])
plt.show()
@unittest.skipIf(not compiled_available, "C compilation failed")
def test_rbf(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import RBF
xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0])
sm = RBF(d0=5)
sm.set_training_values(xt, yt)
sm.train()
num = 100
x = np.linspace(0.0, 4.0, num)
y = sm.predict_values(x)
plt.plot(xt, yt, "o")
plt.plot(x, y)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(["Training data", "Prediction"])
plt.show()
@unittest.skipIf(not compiled_available, "C compilation failed")
def test_rmtb(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import RMTB
xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0])
xlimits = np.array([[0.0, 4.0]])
sm = RMTB(
xlimits=xlimits,
order=4,
num_ctrl_pts=20,
energy_weight=1e-15,
regularization_weight=0.0,
)
sm.set_training_values(xt, yt)
sm.train()
num = 100
x = np.linspace(0.0, 4.0, num)
y = sm.predict_values(x)
plt.plot(xt, yt, "o")
plt.plot(x, y)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(["Training data", "Prediction"])
plt.show()
@unittest.skipIf(not compiled_available, "C compilation failed")
def test_rmtc(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import RMTC
xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0])
xlimits = np.array([[0.0, 4.0]])
sm = RMTC(
xlimits=xlimits,
num_elements=20,
energy_weight=1e-15,
regularization_weight=0.0,
)
sm.set_training_values(xt, yt)
sm.train()
num = 100
x = np.linspace(0.0, 4.0, num)
y = sm.predict_values(x)
plt.plot(xt, yt, "o")
plt.plot(x, y)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(["Training data", "Prediction"])
plt.show()
def test_ls(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import LS
xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0])
sm = LS()
sm.set_training_values(xt, yt)
sm.train()
num = 100
x = np.linspace(0.0, 4.0, num)
y = sm.predict_values(x)
plt.plot(xt, yt, "o")
plt.plot(x, y)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(["Training data", "Prediction"])
plt.show()
def test_qp(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import QP
xt = np.array([[0.0, 1.0, 2.0, 3.0, 4.0]]).T
yt = np.array([[0.2, 1.4, 1.5, 0.9, 1.0], [0.0, 1.0, 2.0, 4, 3]]).T
sm = QP()
sm.set_training_values(xt, yt)
sm.train()
num = 100
x = np.linspace(0.0, 4.0, num)
y = sm.predict_values(x)
t1, _ = plt.plot(xt, yt[:, 0], "o", "C0")
p1 = plt.plot(x, y[:, 0], "C0", label="Prediction 1")
t2, _ = plt.plot(xt, yt[:, 1], "o", "C1")
p2 = plt.plot(x, y[:, 1], "C1", label="Prediction 2")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.show()
def test_krg(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import KRG
xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0])
sm = KRG(theta0=[1e-2])
sm.set_training_values(xt, yt)
sm.train()
num = 100
x = np.linspace(0.0, 4.0, num)
y = sm.predict_values(x)
# estimated variance
s2 = sm.predict_variances(x)
# derivative according to the first variable
dydx = sm.predict_derivatives(xt, 0)
fig, axs = plt.subplots(1)
# add a plot with variance
axs.plot(xt, yt, "o")
axs.plot(x, y)
axs.fill_between(
np.ravel(x),
np.ravel(y - 3 * np.sqrt(s2)),
np.ravel(y + 3 * np.sqrt(s2)),
color="lightgrey",
)
axs.set_xlabel("x")
axs.set_ylabel("y")
axs.legend(
["Training data", "Prediction", "Confidence Interval 99%"],
loc="lower right",
)
plt.show()
def test_mixed_int_krg(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import KRG
from smt.applications.mixed_integer import MixedIntegerSurrogateModel, INT
xt = np.array([0.0, 2.0, 3.0])
yt = np.array([0.0, 1.5, 0.9])
# xtypes = [FLOAT, INT, (ENUM, 3), (ENUM, 2)]
# FLOAT means x1 continuous
# INT means x2 integer
# (ENUM, 3) means x3, x4 & x5 are 3 levels of the same categorical variable
# (ENUM, 2) means x6 & x7 are 2 levels of the same categorical variable
sm = MixedIntegerSurrogateModel(
xtypes=[INT], xlimits=[[0, 4]], surrogate=KRG(theta0=[1e-2])
)
sm.set_training_values(xt, yt)
sm.train()
num = 500
x = np.linspace(0.0, 4.0, num)
y = sm.predict_values(x)
# estimated variance
s2 = sm.predict_variances(x)
fig, axs = plt.subplots(1)
axs.plot(xt, yt, "o")
axs.plot(x, y)
axs.fill_between(
np.ravel(x),
np.ravel(y - 3 * np.sqrt(s2)),
np.ravel(y + 3 * np.sqrt(s2)),
color="lightgrey",
)
axs.set_xlabel("x")
axs.set_ylabel("y")
axs.legend(
["Training data", "Prediction", "Confidence Interval 99%"],
loc="lower right",
)
plt.show()
def test_mixed_gower_krg(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import KRG
from smt.applications.mixed_integer import MixedIntegerSurrogateModel
from smt.applications.mixed_integer import ENUM
# xtypes = [FLOAT, INT, (ENUM, 3), (ENUM, 2)]
# FLOAT means x1 continuous
# INT means x2 integer
# (ENUM, 3) means x3, x4 & x5 are 3 levels of the same categorical variable
# (ENUM, 2) means x6 & x7 are 2 levels of the same categorical variable
xt = np.linspace(1.0, 5.0, 5)
x_train = np.array(["%.2f" % i for i in xt], dtype=object)
yt = np.array([0.0, 1.0, 1.5, 0.5, 1.0])
xlimits = [["0.0", "1.0", " 2.0", "3.0", "4.0"]]
sm = MixedIntegerSurrogateModel(
use_gower_distance=True,
xtypes=[(ENUM, 5)],
xlimits=xlimits,
surrogate=KRG(theta0=[1e-2]),
)
sm.set_training_values(x_train, yt)
sm.train()
num = 101
x = np.linspace(0, 5, num)
x_pred = np.array(["%.2f" % i for i in x], dtype=object)
y = sm.predict_values(x_pred)
plt.plot(xt, yt, "o")
plt.plot(x, y)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(["Training data", "Prediction"])
plt.show()
def test_kpls(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import KPLS
xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0])
sm = KPLS(theta0=[1e-2])
sm.set_training_values(xt, yt)
sm.train()
num = 100
x = np.linspace(0.0, 4.0, num)
y = sm.predict_values(x)
# estimated variance
s2 = sm.predict_variances(x)
# to compute the derivative according to the first variable
dydx = sm.predict_derivatives(xt, 0)
plt.plot(xt, yt, "o")
plt.plot(x, y)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(["Training data", "Prediction"])
plt.show()
# add a plot with variance
plt.plot(xt, yt, "o")
plt.plot(x, y)
plt.fill_between(
np.ravel(x),
np.ravel(y - 3 * np.sqrt(s2)),
np.ravel(y + 3 * np.sqrt(s2)),
color="lightgrey",
)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(["Training data", "Prediction", "Confidence Interval 99%"])
plt.show()
def test_kplsk(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import KPLSK
xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0])
sm = KPLSK(theta0=[1e-2])
sm.set_training_values(xt, yt)
sm.train()
num = 100
x = np.linspace(0.0, 4.0, num)
y = sm.predict_values(x)
# estimated variance
s2 = sm.predict_variances(x)
# derivative according to the first variable
dydx = sm.predict_derivatives(xt, 0)
plt.plot(xt, yt, "o")
plt.plot(x, y)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(["Training data", "Prediction"])
plt.show()
# add a plot with variance
plt.plot(xt, yt, "o")
plt.plot(x, y)
plt.fill_between(
np.ravel(x),
np.ravel(y - 3 * np.sqrt(s2)),
np.ravel(y + 3 * np.sqrt(s2)),
color="lightgrey",
)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(["Training data", "Prediction", "Confidence Interval 99%"])
plt.show()
def test_gekpls(self):
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from smt.surrogate_models import GEKPLS
from smt.problems import Sphere
from smt.sampling_methods import LHS
# Construction of the DOE
fun = Sphere(ndim=2)
sampling = LHS(xlimits=fun.xlimits, criterion="m")
xt = sampling(20)
yt = fun(xt)
# Compute the gradient
for i in range(2):
yd = fun(xt, kx=i)
yt = np.concatenate((yt, yd), axis=1)
# Build the GEKPLS model
sm = GEKPLS(
theta0=[1e-2], xlimits=fun.xlimits, extra_points=1, print_prediction=False
)
sm.set_training_values(xt, yt[:, 0])
for i in range(2):
sm.set_training_derivatives(xt, yt[:, 1 + i].reshape((yt.shape[0], 1)), i)
sm.train()
# Test the model
X = np.arange(fun.xlimits[0, 0], fun.xlimits[0, 1], 0.25)
Y = np.arange(fun.xlimits[1, 0], fun.xlimits[1, 1], 0.25)
X, Y = np.meshgrid(X, Y)
Z = np.zeros((X.shape[0], X.shape[1]))
for i in range(X.shape[0]):
for j in range(X.shape[1]):
Z[i, j] = sm.predict_values(
np.hstack((X[i, j], Y[i, j])).reshape((1, 2))
)
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.plot_surface(X, Y, Z)
plt.show()
def test_genn(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models.genn import GENN, load_smt_data
# Training data
lower_bound = -np.pi
upper_bound = np.pi
number_of_training_points = 4
xt = np.linspace(lower_bound, upper_bound, number_of_training_points)
yt = xt * np.sin(xt)
dyt_dxt = np.sin(xt) + xt * np.cos(xt)
# Validation data
number_of_validation_points = 30
xv = np.linspace(lower_bound, upper_bound, number_of_validation_points)
yv = xv * np.sin(xv)
dyv_dxv = np.sin(xv) + xv * np.cos(xv)
# Truth model
x = np.arange(lower_bound, upper_bound, 0.01)
y = x * np.sin(x)
# GENN
genn = GENN()
genn.options["alpha"] = 0.1 # learning rate that controls optimizer step size
genn.options["beta1"] = 0.9 # tuning parameter to control ADAM optimization
genn.options["beta2"] = 0.99 # tuning parameter to control ADAM optimization
genn.options[
"lambd"
] = 0.1 # lambd = 0. = no regularization, lambd > 0 = regularization
genn.options[
"gamma"
] = 1.0 # gamma = 0. = no grad-enhancement, gamma > 0 = grad-enhancement
genn.options["deep"] = 2 # number of hidden layers
genn.options["wide"] = 6 # number of nodes per hidden layer
genn.options[
"mini_batch_size"
] = 64 # used to divide data into training batches (use for large data sets)
genn.options["num_epochs"] = 20 # number of passes through data
genn.options[
"num_iterations"
] = 100 # number of optimizer iterations per mini-batch
genn.options["is_print"] = True # print output (or not)
load_smt_data(
genn, xt, yt, dyt_dxt
) # convenience function to read in data that is in SMT format
genn.train() # API function to train model
genn.plot_training_history() # non-API function to plot training history (to check convergence)
genn.goodness_of_fit(
xv, yv, dyv_dxv
) # non-API function to check accuracy of regression
y_pred = genn.predict_values(
x
) # API function to predict values at new (unseen) points
# Plot
fig, ax = plt.subplots()
ax.plot(x, y_pred)
ax.plot(x, y, "k--")
ax.plot(xv, yv, "ro")
ax.plot(xt, yt, "k+", mew=3, ms=10)
ax.set(xlabel="x", ylabel="y", title="GENN")
ax.legend(["Predicted", "True", "Test", "Train"])
plt.show()
def test_mgp(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import MGP
from smt.sampling_methods import LHS
# Construction of the DOE
dim = 3
def fun(x):
import numpy as np
res = (
np.sum(x, axis=1) ** 2
- np.sum(x, axis=1)
+ 0.2 * (np.sum(x, axis=1) * 1.2) ** 3
)
return res
sampling = LHS(xlimits=np.asarray([(-1, 1)] * dim), criterion="m")
xt = sampling(8)
yt = np.atleast_2d(fun(xt)).T
# Build the MGP model
sm = MGP(
theta0=[1e-2],
print_prediction=False,
n_comp=1,
)
sm.set_training_values(xt, yt[:, 0])
sm.train()
# Get the transfert matrix A
emb = sm.embedding["C"]
# Compute the smallest box containing all points of A
upper = np.sum(np.abs(emb), axis=0)
lower = -upper
# Test the model
u_plot = np.atleast_2d(np.arange(lower, upper, 0.01)).T
x_plot = sm.get_x_from_u(u_plot) # Get corresponding points in Omega
y_plot_true = fun(x_plot)
y_plot_pred = sm.predict_values(u_plot)
sigma_MGP, sigma_KRG = sm.predict_variances(u_plot, True)
u_train = sm.get_u_from_x(xt) # Get corresponding points in A
# Plots
fig, ax = plt.subplots()
ax.plot(u_plot, y_plot_pred, label="Predicted")
ax.plot(u_plot, y_plot_true, "k--", label="True")
ax.plot(u_train, yt, "k+", mew=3, ms=10, label="Train")
ax.fill_between(
u_plot[:, 0],
y_plot_pred - 3 * sigma_MGP,
y_plot_pred + 3 * sigma_MGP,
color="r",
alpha=0.5,
label="Variance with hyperparameters uncertainty",
)
ax.fill_between(
u_plot[:, 0],
y_plot_pred - 3 * sigma_KRG,
y_plot_pred + 3 * sigma_KRG,
color="b",
alpha=0.5,
label="Variance without hyperparameters uncertainty",
)
ax.set(xlabel="x", ylabel="y", title="MGP")
fig.legend(loc="upper center", ncol=2)
fig.tight_layout()
fig.subplots_adjust(top=0.74)
plt.show()
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
hainm/scipy | scipy/stats/_discrete_distns.py | 34 | 21220 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy import special
from scipy.special import entr, gammaln as gamln
from scipy.misc import logsumexp
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
from ._distn_infrastructure import (
rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf, get_distribution_names)
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0, 1,..., n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.binomial(n, p, self._size)
def _argcheck(self, n, p):
self.b = n
return (n >= 0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k, n, p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q, n, p))
vals1 = np.maximum(vals - 1, 0)
temp = special.bdtr(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p, moments='mv'):
q = 1.0 - p
mu = n * p
var = n * p * q
g1, g2 = None, None
if 's' in moments:
g1 = (q - p) / sqrt(var)
if 'k' in moments:
g2 = (1.0 - 6*p*q) / var
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0, 1}``.
`bernoulli` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n > 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q, n, p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return self._random_state.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return special.xlog1py(k - 1, -p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
M is the total number of objects, n is total number of Type I objects.
The random variate represents the number of Type I objects in N drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The probability mass function is defined as::
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for max(0, N - (M-n)) <= k <= min(n, N)
%(after_notes)s
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return self._random_state.hypergeometric(n, M-n, N, size=self._size)
def _argcheck(self, M, n, N):
cond = (M > 0) & (n >= 0) & (N >= 0)
cond &= (n <= M) & (N <= M)
self.a = max(N-(M-n), 0)
self.b = min(n, N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m
g2 *= (M-1)*M*M
g2 += 6.*n*N*(M-N)*m*(5.*M-6)
g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
return np.sum(entr(vals), axis=0)
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual
# integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
def _logsf(self, k, M, n, N):
"""
More precise calculation than log(sf)
"""
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Integration over probability mass function using logsumexp
k2 = np.arange(quant + 1, draw + 1)
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return self._random_state.logseries(p, size=self._size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
return -np.power(p, k) * 1.0 / k / log(1 - p)
def _stats(self, p):
r = log(1 - p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu):
return self._random_state.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = k*log(mu)-gamln(k+1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
g1 = sqrt(1.0 / tmp)
g2 = 1.0 / tmp
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k)
for ``k*lambda_ >= 0``.
`planck` takes ``lambda_`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
self.b = np.inf
return 1
elif (lambda_ < 0):
self.a = -np.inf
self.b = 0
return 1
else:
return 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
for ``k = 0,..., N-1``.
`boltzmann` takes ``lambda_`` and ``N`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(high - low)
for ``k = low, ..., high - 1``.
`randint` takes ``low`` and ``high`` as shape parameters.
Note the difference to the numpy ``random_integers`` which
returns integers on a *closed* interval ``[low, high]``.
%(after_notes)s
%(example)s
"""
def _argcheck(self, low, high):
self.a = low
self.b = high - 1
return (high > low)
def _pmf(self, k, low, high):
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high=None):
"""An array of *size* random integers >= ``low`` and < ``high``.
If ``high`` is ``None``, then range is >=0 and < low
"""
return self._random_state.randint(low, high, self._size)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k, a) = 1/(zeta(a) * k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a > 0``.
`dlaplace` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return (self._random_state.poisson(mu1, n) -
self._random_state.poisson(mu2, n))
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0,
_ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
px = np.where(x < 0,
_ncx2_cdf(2*mu2, -2*x, 2*mu1),
1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
__all__ = _distn_names + _distn_gen_names
| bsd-3-clause |
chinageology/GeoPython | Experimental/PreTreat.py | 2 | 6940 | # coding:utf-8
import math
import sys
import os
import csv
import random
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.neighbors import NearestNeighbors
import matplotlib
import scipy.stats as st
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
from matplotlib import ft2font
from matplotlib.font_manager import ttfFontProperty
import matplotlib.font_manager as font_manager
import matplotlib.image as mpimg
from pandas.plotting import radviz
from sklearn.neighbors import KNeighborsRegressor
def Del(prepath='/Volumes/Virtual/FastTmp/', path='raw', name='test.txt', head=3, end=-2):
lines = open(path + '/' + name, 'r', encoding='windows-1252').readlines()
open(prepath + 'new' + path + '/' + 'new' + name, 'w', encoding='utf-8').writelines(lines[head:end])
def OldJoin(prepath='/Volumes/Virtual/FastTmp/', path='Raw'):
SourceList = os.listdir(path)
for i in SourceList:
if 'csv' in i and 'new' not in i and i[0] != '.':
Del(prepath=prepath, path=path, name=i, head=3, end=-2)
TargetList = []
for i in SourceList:
if 'csv' in i:
df = pd.read_csv(prepath + 'new' + path + '/' + 'new' + i)
TargetList.append(df)
result = pd.concat(TargetList)
result.reindex()
result.to_csv(prepath + 'result.csv', sep=',', encoding='utf-8')
return (result)
def Join(prepath='/Volumes/Virtual/FastTmp/', path='Excel', name='result'):
SourceList = os.listdir(prepath + path)
TargetList = []
for i in SourceList:
if 'csv' in i and '~' not in i and i[0] != '.':
print(prepath + path + '/' + i)
try:
df = pd.read_csv(prepath + path + '/' + i)
except():
pass
elif 'xls' in i and '~' not in i and i[0] != '.':
try:
df = pd.read_excel(prepath + path + '/' + i)
except():
pass
TargetList.append(df)
result = pd.concat(TargetList)
result.reindex()
result.to_excel(prepath + name + '.xlsx', encoding='utf-8')
return (result)
def CsvToExcel(name='result'):
if 'csv' in name and '~' not in name and name[0] != '.':
df = pd.read_csv(name)
df.to_excel('new' + name[0:-4] + '.xlsx', encoding='utf-8')
pass
def ExcelToCsv(name='result'):
if 'xls' in name and '~' not in name and name[0] != '.':
df = pd.read_excel(name)
df.to_csv('new' + name[0:-5] + '.csv', sep=',', encoding='utf-8')
pass
prepath = '/Volumes/Virtual/FastTmp/'
path = 'Target'
df = pd.read_excel(prepath + 'XiaYing-SiO2-FeMg.xlsx')
# m = ['Width', 'Style', 'Alpha', 'Size', 'Color', 'Marker', 'Author']
# for i in m:
# df = df.drop(i, 1)
df.set_index('Label', inplace=True)
newdf = pd.concat([df.SiO2, df.Ratio], axis=1)
numpyMatrix = df.as_matrix()
# X = numpyMatrix[:, :3] # we only take the first two features.
# y = df.index
X = numpyMatrix
y = df.index
color = []
for i in range(len(y)):
if i == 0:
color.append(1)
else:
if y[i] == y[0]:
color.append(1)
else:
color.append(2)
# x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
# y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
X_reduced = PCA(n_components=5).fit_transform(numpyMatrix)
df = pd.read_excel(prepath + 'XiaYing-SiO2-FeMg.xlsx')
df.set_index('Label', inplace=True)
x = df.SiO2
y = df.Ratio
xtouse = x.values
ytouse = y.values
XtoFit=[]
YtoFit=[]
for i in range(len(x.values)):
if x.values[i] < 60:
XtoFit.append(x.values[i])
YtoFit.append(y.values[i])
z = np.polyfit(YtoFit, XtoFit, 3)
Yline = np.linspace(min(YtoFit), max(YtoFit), 30)
p = np.poly1d(z)
Xline = p(Yline)
newXline = []
#####################################
xmin,xmax = min(x),max(x)
ymin,ymax = min(y),max(y)
# Peform the kernel density estimate
xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([x, y])
kernel = st.gaussian_kde(values)
f = np.reshape(kernel(positions).T, xx.shape)
fig = plt.figure()
ax = fig.gca()
# Contourf plot
cfset = ax.contourf(xx, yy, f, cmap='Blues',alpha=0.3)
## Or kernel density estimate plot instead of the contourf plot
#ax.imshow(np.rot90(f), cmap='Blues', extent=[xmin, xmax, ymin, ymax])
# Contour plot
cset = ax.contour(xx, yy, f, colors='k',alpha=0.3)
# Label plot
ax.clabel(cset, inline=1, fontsize=10)
#####################################
plt.plot(Xline, Yline, 'b-')
alphatouse = []
leveldistance=[]
for i in range(len(xtouse)):
tmp = abs(p(ytouse[i]) - xtouse[i])
leveldistance.append(tmp)
alphatouse.append(tmp)
a = []
group= 100
step= abs(min(alphatouse)-max(alphatouse))/group
for i in alphatouse:
if min(alphatouse)<=i<min(alphatouse)+step:
a.append(0.8)
elif min(alphatouse)+step<=i<min(alphatouse)+2*step:
a.append(0.6)
elif min(alphatouse)+2*step<=i<min(alphatouse)+3*step:
a.append(0.4)
else:
a.append(0.2)
#plt.scatter(x, y, label='', s=3, color='red', alpha=a)
for i in range(len(xtouse)):
plt.scatter(xtouse[i], ytouse[i], label='', s=3, color='red', alpha=a[i])
pass
#fig = plt.figure(1, figsize=(8, 6))
# ax = Axes3D(fig, elev=-150, azim=110)
# plt.scatter(X_reduced[:, 1], X_reduced[:, 2], c=color, cmap=plt.cm.Set1, edgecolor='k', s=40)
# plt.scatter(x, y, label='', s=3, color='blue', alpha=0.3)
# z= np.polyfit(x, y, 2)
# ax.set_title("First three PCA directions")
# ax.set_xlabel("SiO2")
# ax.w_xaxis.set_ticklabels([])
# ax.set_ylabel("TFeO")
# ax.w_yaxis.set_ticklabels([])
# ax.set_zlabel("MgO")
# ax.w_zaxis.set_ticklabels([])
plt.show()
tm=Join(path='塔木兰沟组',name='新塔木兰沟')
'''
SourceList = os.listdir(prepath + path)
TargetList = []
for i in SourceList:
ExcelToCsv(path='塔木兰沟组',name=i)
#df = pd.read_excel(prepath+"塔木兰沟组数据交集.xlsx",keep_default_na=False, na_values=[""])
#tm=Join(path='Target',name='满克头鄂博组数据交集')
#DataToPlot = pd.read_excel(prepath+'result.xlsx')
#DataToPlot.plot()
#DataToPlot.plot.area()
#plt.figure()
#radviz(DataToPlot , 'Ag109')
#plt.show()
# created by Huang Lu
# 27/08/2016 17:05:45
# Department of EE, Tsinghua Univ.
import cv2
import numpy as np
cap = cv2.VideoCapture(1)
while(1):
# get a frame
ret, frame = cap.read()
# show a frame
cv2.imshow("capture", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
for i in alphatouse:
#tmp = - np.power(np.e,i / max(alphatouse))
#tmp = 1- np.power(i / max(alphatouse),2)
tmp = np.power(np.e,i)
a.append(tmp)
'''
| gpl-3.0 |
tmhm/scikit-learn | examples/model_selection/plot_roc_crossval.py | 247 | 3253 | """
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.cross_validation.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
endrebak/epic | tests/run/test_merge_chip_and_input.py | 1 | 4225 | import pytest
import pandas as pd
import numpy as np
import logging
from io import StringIO
from joblib import delayed, Parallel
@pytest.fixture
def input_data():
pass
@pytest.fixture
def expected_result():
pass
def merge_chip_and_input(windows, nb_cpu):
"""Merge lists of chromosome bin df chromosome-wise.
Returns a list of dataframes, one per chromosome, with the collective count
per bin for all files.
Keyword Arguments:
windows -- OrderedDict where the keys are files, the values are lists of
dfs, one per chromosome.
nb_cpu -- cores to use
"""
windows = iter(windows)
merged = next(windows)
for chromosome_dfs in windows:
merged = merge_two_bin_dfs(merged, chromosome_dfs, nb_cpu)
return merged
# @pytest.mark.unit
# def test_merge_two_bin_files(sample1_dfs, sample2_dfs):
# """TODO: Need to test that the lists might not have the same/all chromosomes.
# It might be possible that there are no sig islands on one chromosome in one
# file, while there are in the others. Solve by taking in dict with chromos
# instead of list with files?
# You will probably be asked about a bug due to this some time.
# """
# print("Read run epic code. Begin there!\n" * 5)
# result = merge_chip_and_input([sample2_dfs, sample2_dfs], 1)
# print(result)
# assert 1
def merge_two_bin_dfs(sample1_dfs, sample2_dfs, nb_cpu):
merged_chromosome_dfs = Parallel(n_jobs=nb_cpu)(
delayed(_merge_two_bin_dfs)(df1, df2)
for df1, df2 in zip(sample1_dfs, sample2_dfs))
return merged_chromosome_dfs
def _merge_two_bin_dfs(df1, df2):
merged_df = df1.merge(df2, how="outer", on=["Chromosome", "Bin"]) #,
# suffixes=("_x", "_y"))
print(merged_df)
raise
merged_df = merged_df.fillna(0)
merged_df["Count"] = merged_df["Count_x"] + merged_df["Count_y"]
merged_df = merged_df.drop(["Count_x", "Count_y"], axis=1)
return merged_df
@pytest.fixture
def sample1_dfs():
return [pd.read_table(
StringIO(u"""
Count Chromosome Bin
1 chrM 400
1 chrM 2600
1 chrM 3600
1 chrM 3800
1 chrM 12800
1 chrM 14200"""),
sep="\s+",
header=0), pd.read_table(
StringIO(u"""Count Chromosome Bin
1 chrX 2820000
1 chrX 2854800
1 chrX 3001400
1 chrX 3354400
1 chrX 3489400
1 chrX 3560200
1 chrX 4011200
1 chrX 4644600
1 chrX 4653600
1 chrX 4793400
1 chrX 5136800
1 chrX 5572800
1 chrX 5589400
1 chrX 5792000
1 chrX 5961800
1 chrX 6951000
1 chrX 7125800
1 chrX 7199000
1 chrX 7443200
1 chrX 7606000
1 chrX 7627800
1 chrX 8035600
1 chrX 8073600
1 chrX 8367800
1 chrX 9021000
1 chrX 9472400
1 chrX 9620800
1 chrX 9652000
1 chrX 9801000
1 chrX 9953800"""),
sep="\s+",
header=0)]
@pytest.fixture
def sample2_dfs():
return [pd.read_table(
StringIO(u"""
Count Chromosome Bin
1 chrM 400
1 chrM 2600
1 chrM 3600
1 chrM 3800
1 chrM 12800
1 chrM 14200"""),
header=0,
sep="\s+", ), pd.read_table(
StringIO(u"""Count Chromosome Bin
1 chrX 2820000
1 chrX 2854800
1 chrX 3001400
1 chrX 3354400
1 chrX 3489400
1 chrX 3560200
1 chrX 4011200
1 chrX 4644600
1 chrX 4653600
1 chrX 4793400
1 chrX 5136800
1 chrX 5572800
1 chrX 5589400
1 chrX 5792000
1 chrX 5961800
1 chrX 6951000
1 chrX 7125800
1 chrX 7199000
1 chrX 7443200
1 chrX 7606000
1 chrX 7627800
1 chrX 8035600
1 chrX 8073600
1 chrX 8367800
1 chrX 9021000
1 chrX 9472400
1 chrX 9620800
1 chrX 9652000
1 chrX 9801000
1 chrX 9953800"""),
sep="\s+",
header=0)]
| mit |
rlpy/rlpy | rlpy/Representations/LocalBases.py | 1 | 7491 | """
Representations which use local bases function (e.g. kernels) distributed
in the statespace according to some scheme (e.g. grid, random, on previous
samples)
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import super
from future import standard_library
standard_library.install_aliases()
from builtins import range
from past.utils import old_div
from .Representation import Representation
import numpy as np
from rlpy.Tools.GeneralTools import addNewElementForAllActions
import matplotlib.pyplot as plt
try:
from .kernels import batch
except ImportError:
from .slow_kernels import batch
print("C-Extensions for kernels not available, expect slow runtime")
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = ["Alborz Geramifard", "Robert H. Klein", "Christoph Dann",
"William Dabney", "Jonathan P. How"]
__license__ = "BSD 3-Clause"
class LocalBases(Representation):
"""
abstract base class for representations that use local basis functions
"""
#: centers of bases
centers = None
#: widths of bases
widths = None
def __init__(self, domain, kernel, normalization=False, seed=1, **kwargs):
"""
:param domain: domain to learn on.
:param kernel: function handle to use for kernel function evaluations.
:param normalization: (Boolean) If true, normalize feature vector so
that sum( phi(s) ) = 1.
Associates a kernel function with each
"""
self.kernel = batch[kernel.__name__]
self.normalization = normalization
self.centers = np.zeros((0, domain.statespace_limits.shape[0]))
self.widths = np.zeros((0, domain.statespace_limits.shape[0]))
super(LocalBases, self).__init__(domain, seed=seed)
def phi_nonTerminal(self, s):
v = self.kernel(s, self.centers, self.widths)
if self.normalization and not v.sum() == 0.:
# normalize such that each vector has a l1 norm of 1
v /= v.sum()
return v
def plot_2d_feature_centers(self, d1=None, d2=None):
"""
:param d1: 1 (of 2 possible) indices of dimensions to plot; ignore all
others, purely visual.
:param d2: 1 (of 2 possible) indices of dimensions to plot; ignore all
others, purely visual.
Phe centers of all features in dimension d1 and d2.
If no dimensions are specified, the first two continuous dimensions
are shown.
"""
if d1 is None and d2 is None:
# just take the first two dimensions
d1, d2 = self.domain.continuous_dims[:2]
plt.figure("Feature Dimensions {} and {}".format(d1, d2))
for i in range(self.centers.shape[0]):
plt.plot([self.centers[i, d1]],
[self.centers[i, d2]], "r", marker="x")
plt.draw()
class NonparametricLocalBases(LocalBases):
def __init__(self, domain, kernel,
max_similarity=0.9, resolution=5, **kwargs):
"""
:param domain: domain to learn on.
:param kernel: function handle to use for kernel function evaluations.
:param max_similarity: threshold to allow feature to be added to
representation. Larger max_similarity makes it \"easier\" to add
more features by permitting larger values of phi(s) before
discarding. (An existing feature function in phi() with large value
at phi(s) implies that it is very representative of the true
function at *s*. i.e., the value of a feature in phi(s) is
inversely related to the \"similarity\" of a potential new feature.
:param resolution: to be used by the ``kernel()`` function, see parent.
Determines *width* of basis functions, eg sigma in Gaussian basis.
"""
self.max_similarity = max_similarity
self.common_width = old_div((domain.statespace_limits[:, 1]
- domain.statespace_limits[:, 0]), resolution)
self.features_num = 0
super(
NonparametricLocalBases,
self).__init__(
domain,
kernel,
**kwargs)
def pre_discover(self, s, terminal, a, sn, terminaln):
norm = self.normalization
expanded = 0
self.normalization = False
if not terminal:
phi_s = self.phi_nonTerminal(s)
if np.all(phi_s < self.max_similarity):
self._add_feature(s)
expanded += 1
if not terminaln:
phi_s = self.phi_nonTerminal(sn)
if np.all(phi_s < self.max_similarity):
self._add_feature(sn)
expanded += 1
self.normalization = norm
return expanded
def _add_feature(self, center):
self.features_num += 1
self.centers = np.vstack((self.centers, center))
self.widths = np.vstack((self.widths, self.common_width))
# TODO if normalized, use Q estimate for center to fill weight_vec
new = np.zeros((self.domain.actions_num, 1))
self.weight_vec = addNewElementForAllActions(
self.weight_vec,
self.domain.actions_num,
new)
class RandomLocalBases(LocalBases):
def __init__(self, domain, kernel, num=100, resolution_min=5,
resolution_max=None, seed=1, **kwargs):
"""
:param domain: domain to learn on.
:param kernel: function handle to use for kernel function evaluations.
:param num: Fixed number of feature (kernel) functions to use in
EACH dimension. (for a total of features_num=numDims * num)
:param resolution_min: resolution selected uniform random, lower bound.
:param resolution_max: resolution selected uniform random, upper bound.
:param seed: the random seed to use when scattering basis functions.
Randomly scatter ``num`` feature functions throughout the domain, with
sigma / noise parameter selected uniform random between
``resolution_min`` and ``resolution_max``. NOTE these are
sensitive to the choice of coordinate (scale with coordinate units).
"""
self.features_num = num
self.dim_widths = (domain.statespace_limits[:, 1]
- domain.statespace_limits[:, 0])
self.resolution_max = resolution_max
self.resolution_min = resolution_min
super(
RandomLocalBases,
self).__init__(
domain,
kernel,
seed=seed,
**kwargs)
self.centers = np.zeros((num, len(self.dim_widths)))
self.widths = np.zeros((num, len(self.dim_widths)))
self.init_randomization()
def init_randomization(self):
for i in range(self.features_num):
for d in range(len(self.dim_widths)):
self.centers[i, d] = self.random_state.uniform(
self.domain.statespace_limits[d, 0],
self.domain.statespace_limits[d, 1])
self.widths[i, d] = self.random_state.uniform(
old_div(self.dim_widths[d], self.resolution_max),
old_div(self.dim_widths[d], self.resolution_min))
| bsd-3-clause |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/numpy/lib/function_base.py | 7 | 134697 | from __future__ import division, absolute_import, print_function
import warnings
import sys
import collections
import operator
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean, any, sum
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
from numpy.core.multiarray import _insert, add_docstring
from numpy.core.multiarray import digitize, bincount, interp as compiled_interp
from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
from numpy.compat import long
# Force range to be a generator, for np.delete's usage.
if sys.version_info[0] < 3:
range = xrange
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
]
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try:
iter(y)
except:
return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError(
'`bins` should be a positive integer.')
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
# At this point, if the weights are not integer, floating point, or
# complex, we have to use the slow algorithm.
if weights is not None and not (np.can_cast(weights.dtype, np.double) or
np.can_cast(weights.dtype, np.complex)):
bins = linspace(mn, mx, bins + 1, endpoint=True)
if not iterable(bins):
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
# Initialize empty histogram
n = np.zeros(bins, ntype)
# Pre-compute histogram scaling factor
norm = bins / (mx - mn)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i:i + BLOCK]
# Only include values in the right range
keep = (tmp_a >= mn)
keep &= (tmp_a <= mx)
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
tmp_a = tmp_a.astype(float)
tmp_a -= mn
tmp_a *= norm
# Compute the bin indices, and for values that lie exactly on mx we
# need to subtract one
indices = tmp_a.astype(np.intp)
indices[indices == bins] -= 1
# We now compute the histogram using bincount
if ntype.kind == 'c':
n.real += np.bincount(indices, weights=tmp_w.real, minlength=bins)
n.imag += np.bincount(indices, weights=tmp_w.imag, minlength=bins)
else:
n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype)
# We now compute the bin edges since these are returned
bins = linspace(mn, mx, bins + 1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Initialize empty histogram
n = np.zeros(bins.shape, ntype)
if weights is None:
for i in arange(0, len(a), BLOCK):
sa = sort(a[i:i+BLOCK])
n += np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero, ], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# avoid rounding issues for comparisons when dealing with inexact types
if np.issubdtype(sample.dtype, np.inexact):
edge_dt = sample.dtype
else:
edge_dt = float
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError(
"Element at index %s in `bins` should be a positive "
"integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
else:
edges[i] = asarray(bins[i], edge_dt)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
"Found bin edge of size <= 0. Did you specify `bins` with"
"non-monotonic sequence?")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1, -1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : array_type or double
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix):
a = np.asarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else:
a = a + 0.0
wgt = np.asarray(weights)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis, dtype=np.result_type(a.dtype, wgt.dtype))
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major (C-style) or
column-major (Fortran-style) memory representation.
Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have a default value of 0.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if (isscalar(condlist) or not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray))):
condlist = [condlist]
condlist = array(condlist, dtype=bool)
n = len(condlist)
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
zerod = False
if x.ndim == 0:
x = x[None]
zerod = True
if condlist.shape[-1] != 1:
condlist = condlist.T
if n == n2 - 1: # compute the "otherwise" condition.
totlist = np.logical_or.reduce(condlist, axis=0)
condlist = np.vstack([condlist, ~totlist])
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
# 2014-02-24, 1.9
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
DeprecationWarning)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it seperatly optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
'invalid entry in choicelist: should be boolean ndarray')
if deprecated_ints:
# 2014-02-24, 1.9
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
warnings.warn(msg, DeprecationWarning)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior and either first differences or second order accurate
one-sides (forward or backwards) differences at the boundaries. The
returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : list of scalar, optional
N scalars specifying the sample distances for each dimension,
i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
edge_order : {1, 2}, optional
Gradient is calculated using N\ :sup:`th` order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
Returns
-------
gradient : list of ndarray
Each element of `list` has the same shape as `f` giving the derivative
of `f` with respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
For two dimensional arrays, the return will be two arrays ordered by
axis. In this example the first array stands for the gradient in
rows and the second one in columns direction:
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
>>> x = np.array([0, 1, 2, 3, 4])
>>> dx = np.gradient(x)
>>> y = x**2
>>> np.gradient(y, dx, edge_order=2)
array([-0., 2., 4., 6., 8.])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == N:
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
raise TypeError('"{}" are not valid keyword arguments.'.format(
'", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M':
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm':
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
else:
y = f
for axis in range(N):
if y.shape[axis] < 2:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least two elements are required.")
# Numerical differentiation: 1st order edges, 2nd order interior
if y.shape[axis] == 2 or edge_order == 1:
# Use first order differences for time data
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (y[1] - y[0])
out[slice1] = (y[slice2] - y[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (y[-1] - y[-2])
out[slice1] = (y[slice2] - y[slice3])
# Numerical differentiation: 2st order edges, 2nd order interior
else:
# Use second order differences where possible
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
# 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0
out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
slice4[axis] = -3
# 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3])
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
out /= dx[axis]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d, cumsum
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None, period=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing if argument
`period` is not specified. Otherwise, `xp` is internally sorted after
normalizing the periodic boundaries with ``xp = xp % period``.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, default is `fp[-1]`.
period : None or float, optional
A period for the x-coordinates. This parameter allows the proper
interpolation of angular x-coordinates. Parameters `left` and `right`
are ignored if `period` is specified.
.. versionadded:: 1.10.0
Returns
-------
y : float or ndarray
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
If `xp` or `fp` are not 1-D sequences
If `period == 0`
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
Interpolation with periodic x-coordinates:
>>> x = [-180, -170, -185, 185, -10, -5, 0, 365]
>>> xp = [190, -190, 350, -350]
>>> fp = [5, 10, 3, 4]
>>> np.interp(x, xp, fp, period=360)
array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
"""
if period is None:
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
else:
if period == 0:
raise ValueError("period must be a non-zero value")
period = abs(period)
left = None
right = None
return_array = True
if isinstance(x, (float, int, number)):
return_array = False
x = [x]
x = np.asarray(x, dtype=np.float64)
xp = np.asarray(xp, dtype=np.float64)
fp = np.asarray(fp, dtype=np.float64)
if xp.ndim != 1 or fp.ndim != 1:
raise ValueError("Data points must be 1-D sequences")
if xp.shape[0] != fp.shape[0]:
raise ValueError("fp and xp are not of the same length")
# normalizing periodic boundaries
x = x % period
xp = xp % period
asort_xp = np.argsort(xp)
xp = xp[asort_xp]
fp = fp[asort_xp]
xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period))
fp = np.concatenate((fp[-1:], fp, fp[0:1]))
if return_array:
return compiled_interp(x, xp, fp, left, right)
else:
return compiled_interp(x, xp, fp, left, right).item()
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : ndarray or scalar
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
_nx.copyto(ph_correct, 0, where=abs(dd) < discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.:
break
else:
first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.:
break
else:
last = last - 1
return filt[first:last]
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True], tmp[1:] != tmp[:-1]))
return tmp[idx]
except AttributeError:
items = sorted(set(x))
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Note that `place` does the exact opposite of `extract`.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress, place
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
self.cache = cache
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None, fweights=None, aweights=None):
"""
Estimate a covariance matrix, given data and weights.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
See the notes for an outline of the algorithm.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same form
as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` corresponds to the
number of observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using the
keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
If not ``None`` the default value implied by `bias` is overridden.
Note that ``ddof=1`` will return the unbiased estimate, even if both
`fweights` and `aweights` are specified, and ``ddof=0`` will return
the simple average. See the notes for the details. The default value
is ``None``.
.. versionadded:: 1.5
fweights : array_like, int, optional
1-D array of integer freguency weights; the number of times each
observation vector should be repeated.
.. versionadded:: 1.10
aweights : array_like, optional
1-D array of observation vector weights. These relative weights are
typically large for observations considered "important" and smaller for
observations considered less "important". If ``ddof=0`` the array of
weights can be used to assign probabilities to observation vectors.
.. versionadded:: 1.10
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Notes
-----
Assume that the observations are in the columns of the observation
array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The
steps to compute the weighted covariance are as follows::
>>> w = f * a
>>> v1 = np.sum(w)
>>> v2 = np.sum(w * a)
>>> m -= np.sum(m * w, axis=1, keepdims=True) / v1
>>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2)
Note that when ``a == 1``, the normalization factor
``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)``
as it should.
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if rowvar == 0 and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return np.array([]).reshape(0, 0)
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
if rowvar == 0 and y.shape[0] != 1:
y = y.T
X = np.vstack((X, y))
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
# Get the product of frequencies and weights
w = None
if fweights is not None:
fweights = np.asarray(fweights, dtype=np.float)
if not np.all(fweights == np.around(fweights)):
raise TypeError(
"fweights must be integer")
if fweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional fweights")
if fweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and fweights")
if any(fweights < 0):
raise ValueError(
"fweights cannot be negative")
w = fweights
if aweights is not None:
aweights = np.asarray(aweights, dtype=np.float)
if aweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional aweights")
if aweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and aweights")
if any(aweights < 0):
raise ValueError(
"aweights cannot be negative")
if w is None:
w = aweights
else:
w *= aweights
avg, w_sum = average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
# Determine the normalization
if w is None:
fact = float(X.shape[1] - ddof)
elif ddof == 0:
fact = w_sum
elif aweights is None:
fact = w_sum - ddof
else:
fact = w_sum - ddof*sum(w*aweights)/w_sum
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
X -= avg[:, None]
if w is None:
X_T = X.T
else:
X_T = (X*w).T
return (dot(X, X_T.conj())/fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=np._NoValue, ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `R`, and the
covariance matrix, `C`, is
.. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `R` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : _NoValue, optional
Has no affect, do not use.
.. deprecated:: 1.10.0
ddof : _NoValue, optional
Has no affect, do not use.
.. deprecated:: 1.10.0
Returns
-------
R : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
Notes
-----
This function accepts but discards arguments `bias` and `ddof`. This is
for backwards compatibility with previous versions of this function. These
arguments had no effect on the return values of the function and can be
safely ignored in this and previous versions of numpy.
"""
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn('bias and ddof have no affect and are deprecated',
DeprecationWarning)
c = cov(x, y, rowvar)
try:
d = diag(c)
except ValueError: # scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
return c / sqrt(multiply.outer(d, d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x < 0)
x[ind] = -x[ind]
ind = (x <= 8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a, subok=True, copy=True)
b.sort(0)
return b
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function Kapable of receiving an axis argument.
It is is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
try:
axis = operator.index(axis)
if axis >= nd or axis < -nd:
raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim))
keepdim[axis] = 1
except TypeError:
sax = set()
for x in axis:
if x >= nd or x < -nd:
raise IndexError("axis %d out of bounds (%d)" % (x, nd))
if x in sax:
raise ValueError("duplicate value in axis")
sax.add(x % nd)
keepdim[x] = 1
keep = sax.symmetric_difference(frozenset(range(nd)))
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
else:
keepdim = [1] * a.ndim
r = func(a, **kwargs)
return r, keepdim
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and buffer length as the expected output, but the
type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve the
contents of the input array. Treat the input as undefined, but it
will probably be fully or partially sorted. Default is False. Note
that, if `overwrite_input` is True and the input is not already an
ndarray, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in which
case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
# Set the partition indexes
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
kth = [szh - 1, szh]
else:
kth = [(sz - 1) // 2]
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
kth.append(-1)
if overwrite_input:
if axis is None:
part = a.ravel()
part.partition(kth)
else:
a.partition(kth, axis=axis)
part = a
else:
part = partition(a, kth, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
# warn and return nans like mean would
rout = mean(part[indexer], axis=axis, out=out)
part = np.rollaxis(part, axis, part.ndim)
n = np.isnan(part[..., -1])
if rout.ndim == 0:
if n == True:
warnings.warn("Invalid value encountered in median",
RuntimeWarning)
if out is not None:
out[...] = a.dtype.type(np.nan)
rout = out
else:
rout = a.dtype.type(np.nan)
elif np.count_nonzero(n.ravel()) > 0:
warnings.warn("Invalid value encountered in median for" +
" %d results" % np.count_nonzero(n.ravel()),
RuntimeWarning)
rout[n] = np.nan
return rout
else:
# if there are no nans
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(part[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute which must be between 0 and 100 inclusive.
axis : int or sequence of int, optional
Axis along which the percentiles are computed. The default (None)
is to compute the percentiles along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
percentile. This will save memory when you do not need to preserve
the contents of the input array. In this case you should not make
any assumptions about the content of the passed in array `a` after
this function completes -- treat it as undefined. Default is False.
Note that, if the `a` input is not already an array this parameter
will have no effect, `a` will be converted to an array internally
regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original array `a`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If a single percentile `q` is given and axis=None a scalar is
returned. If multiple percentiles `q` are given an array holding
the result is returned. The results are listed in the first axis.
(If `out` is specified, in which case that array is returned
instead). If the input contains integers, or floats of smaller
precision than 64, then the output data-type is float64. Otherwise,
the output data-type is the same as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the q-th percentile of V is the q-th ranked
value in a sorted copy of V. The values and distances of the two
nearest neighbors as well as the `interpolation` parameter will
determine the percentile if the normalized ranking does not match q
exactly. This function is the same as the median if ``q=50``, the same
as the minimum if ``q=0`` and the same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
array([ 3.5])
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=m)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([[ 7.],
[ 2.]])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 50, axis=None, overwrite_input=True)
array([ 3.5])
"""
q = array(q, dtype=np.float64, copy=True)
r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
a = asarray(a)
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.size < 10:
for i in range(q.size):
if q[i] < 0. or q[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
q[i] /= 100.
else:
# faster than any()
if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.):
raise ValueError("Percentiles must be in the range [0,100]")
q /= 100.
# prepare a for partioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
indices = floor(indices) + 0.5
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
pass # keep index as fraction and interpolate
else:
raise ValueError(
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
n = np.array(False, dtype=bool) # check for nan's flag
if indices.dtype == intp: # take the points along axis
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = concatenate((indices, [-1]))
ap.partition(indices, axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = indices[:-1]
n = np.isnan(ap[-1:, ...])
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = concatenate((indices_above, [-1]))
weights_above = indices - indices_below
weights_below = 1.0 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
weights_below = np.rollaxis(weights_below, axis, 0)
weights_above = np.rollaxis(weights_above, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = indices_above[:-1]
n = np.isnan(ap[-1:, ...])
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
x1 = np.rollaxis(x1, axis, 0)
x2 = np.rollaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
if np.any(n):
warnings.warn("Invalid value encountered in median",
RuntimeWarning)
if zerod:
if ap.ndim == 1:
if out is not None:
out[...] = a.dtype.type(np.nan)
r = out
else:
r = a.dtype.type(np.nan)
else:
r[..., n.squeeze(0)] = a.dtype.type(np.nan)
else:
if r.ndim == 1:
r[:] = a.dtype.type(np.nan)
else:
r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan)
return r
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
use of `mask`.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
if ndim == 0:
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from delete and raise an error", DeprecationWarning)
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
if wrap:
return wrap(arr.copy())
else:
return arr.copy()
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop-start, dtype=bool)
keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[slobj2]
slobj2[axis] = keep
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
else:
return new
_obj = obj
obj = np.asarray(obj)
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
warnings.warn(
"in the future insert will treat boolean arrays and array-likes "
"as boolean index instead of casting it to integer", FutureWarning)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[slobj] = arr[slobj2]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
if not np.can_cast(obj, intp, 'same_kind'):
# obj.size = 1 special case always failed and would just
# give superfluous warnings.
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in delete will result in an "
"error in the future", DeprecationWarning)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
# Test if there are out of bound indices, this is deprecated
inside_bounds = (obj < N) & (obj >= -N)
if not inside_bounds.all():
# 2013-09-24, 1.9
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
DeprecationWarning)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
"`numpy.delete`.", FutureWarning)
obj = obj[positive_indices]
keep[obj, ] = False
slobj[axis] = keep
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
.. versionadded:: 1.8.0
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays along an existing axis.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
Difference between sequence and scalars:
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
... np.insert(a, [1], [[1],[2],[3]], axis=1))
True
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
else:
if ndim > 0 and (axis < -ndim or axis >= ndim):
raise IndexError(
"axis %i is out of bounds for an array of "
"dimension %i" % (axis, ndim))
if (axis < 0):
axis += ndim
if (ndim == 0):
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from insert and raise an error", DeprecationWarning)
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), **{'dtype': intp})
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
"integer", FutureWarning)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
# raise ValueError('boolean array argument obj to insert '
# 'must be one dimensional')
#indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (index < 0):
index += N
# There are some object array corner cases here, but we cannot avoid
# that:
values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
slobj[axis] = slice(index, index+numnew)
new[slobj] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
if not np.can_cast(indices, intp, 'same_kind'):
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in insert will result in an "
"error in the future", DeprecationWarning)
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
| gpl-2.0 |
Agent007/deep-learning | image-classification/helper.py | 155 | 5631 | import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelBinarizer
def _load_label_names():
"""
Load the label names from file
"""
return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
"""
Load a batch of the dataset
"""
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
def display_stats(cifar10_dataset_folder_path, batch_id, sample_id):
"""
Display Stats of the the dataset
"""
batch_ids = list(range(1, 6))
if batch_id not in batch_ids:
print('Batch Id out of Range. Possible Batch Ids: {}'.format(batch_ids))
return None
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
if not (0 <= sample_id < len(features)):
print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id))
return None
print('\nStats of batch {}:'.format(batch_id))
print('Samples: {}'.format(len(features)))
print('Label Counts: {}'.format(dict(zip(*np.unique(labels, return_counts=True)))))
print('First 20 Labels: {}'.format(labels[:20]))
sample_image = features[sample_id]
sample_label = labels[sample_id]
label_names = _load_label_names()
print('\nExample of Image {}:'.format(sample_id))
print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max()))
print('Image - Shape: {}'.format(sample_image.shape))
print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label]))
plt.axis('off')
plt.imshow(sample_image)
def _preprocess_and_save(normalize, one_hot_encode, features, labels, filename):
"""
Preprocess data and save it to file
"""
features = normalize(features)
labels = one_hot_encode(labels)
pickle.dump((features, labels), open(filename, 'wb'))
def preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode):
"""
Preprocess Training and Validation Data
"""
n_batches = 5
valid_features = []
valid_labels = []
for batch_i in range(1, n_batches + 1):
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_i)
validation_count = int(len(features) * 0.1)
# Prprocess and save a batch of training data
_preprocess_and_save(
normalize,
one_hot_encode,
features[:-validation_count],
labels[:-validation_count],
'preprocess_batch_' + str(batch_i) + '.p')
# Use a portion of training batch for validation
valid_features.extend(features[-validation_count:])
valid_labels.extend(labels[-validation_count:])
# Preprocess and Save all validation data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(valid_features),
np.array(valid_labels),
'preprocess_validation.p')
with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
# load the test data
test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
test_labels = batch['labels']
# Preprocess and Save all test data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(test_features),
np.array(test_labels),
'preprocess_test.p')
def batch_features_labels(features, labels, batch_size):
"""
Split features and labels into batches
"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
def load_preprocess_training_batch(batch_id, batch_size):
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
filename = 'preprocess_batch_' + str(batch_id) + '.p'
features, labels = pickle.load(open(filename, mode='rb'))
# Return the training data in batches of size <batch_size> or less
return batch_features_labels(features, labels, batch_size)
def display_image_predictions(features, labels, predictions):
n_classes = 10
label_names = _load_label_names()
label_binarizer = LabelBinarizer()
label_binarizer.fit(range(n_classes))
label_ids = label_binarizer.inverse_transform(np.array(labels))
fig, axies = plt.subplots(nrows=4, ncols=2)
fig.tight_layout()
fig.suptitle('Softmax Predictions', fontsize=20, y=1.1)
n_predictions = 3
margin = 0.05
ind = np.arange(n_predictions)
width = (1. - 2. * margin) / n_predictions
for image_i, (feature, label_id, pred_indicies, pred_values) in enumerate(zip(features, label_ids, predictions.indices, predictions.values)):
pred_names = [label_names[pred_i] for pred_i in pred_indicies]
correct_name = label_names[label_id]
axies[image_i][0].imshow(feature)
axies[image_i][0].set_title(correct_name)
axies[image_i][0].set_axis_off()
axies[image_i][1].barh(ind + margin, pred_values[::-1], width)
axies[image_i][1].set_yticks(ind + margin)
axies[image_i][1].set_yticklabels(pred_names[::-1])
axies[image_i][1].set_xticks([0, 0.5, 1.0])
| mit |
JanNash/sms-tools | lectures/06-Harmonic-model/plots-code/spectral-peaks.py | 22 | 1161 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 512*2
M = 511
t = -60
w = np.hamming(M)
start = .8*fs
hN = N/2
hM = (M+1)/2
x1 = x[start:start+M]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
pmag = mX[ploc]
freqaxis = fs*np.arange(mX.size)/float(N)
plt.figure(1, figsize=(9, 6))
plt.subplot (2,1,1)
plt.plot(freqaxis, mX,'r', lw=1.5)
plt.axis([0,7000,-80,max(mX)+1])
plt.plot(fs * iploc / N, ipmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('mX + peaks (oboe-A4.wav)')
plt.subplot (2,1,2)
plt.plot(freqaxis, pX,'c', lw=1.5)
plt.axis([0,7000, min(pX),10])
plt.plot(fs * iploc/N, ipphase, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('pX + peaks')
plt.tight_layout()
plt.savefig('spectral-peaks.png')
plt.show()
| agpl-3.0 |
rosswhitfield/mantid | qt/python/mantidqt/widgets/sliceviewer/test/test_sliceviewer_presenter.py | 3 | 24997 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
#
import sys
import unittest
from unittest import mock
from unittest.mock import patch
from mantid.api import MultipleExperimentInfos
import matplotlib
matplotlib.use('Agg')
# Mock out simpleapi to import expensive import of something we don't use anyway
sys.modules['mantid.simpleapi'] = mock.MagicMock()
from mantidqt.widgets.sliceviewer.model import SliceViewerModel, WS_TYPE # noqa: E402
from mantidqt.widgets.sliceviewer.presenter import ( # noqa: E402
PeaksViewerCollectionPresenter, SliceViewer)
from mantidqt.widgets.sliceviewer.transform import NonOrthogonalTransform # noqa: E402
from mantidqt.widgets.sliceviewer.toolbar import ToolItemText # noqa: E402
from mantidqt.widgets.sliceviewer.view import SliceViewerView, SliceViewerDataView # noqa: E402
def _create_presenter(model, view, mock_sliceinfo_cls, enable_nonortho_axes, supports_nonortho):
model.get_ws_type = mock.Mock(return_value=WS_TYPE.MDH)
model.is_ragged_matrix_plotted.return_value = False
model.get_dim_limits.return_value = ((-1, 1), (-2, 2))
data_view_mock = view.data_view
data_view_mock.plot_MDH = mock.Mock()
presenter = SliceViewer(None, model=model, view=view)
if enable_nonortho_axes:
data_view_mock.nonorthogonal_mode = True
data_view_mock.nonortho_transform = mock.MagicMock(NonOrthogonalTransform)
data_view_mock.nonortho_transform.tr.return_value = (0, 1)
presenter.nonorthogonal_axes(True)
else:
data_view_mock.nonorthogonal_mode = False
data_view_mock.nonortho_transform = None
data_view_mock.disable_tool_button.reset_mock()
data_view_mock.create_axes_orthogonal.reset_mock()
data_view_mock.create_axes_nonorthogonal.reset_mock()
mock_sliceinfo_instance = mock_sliceinfo_cls.return_value
mock_sliceinfo_instance.can_support_nonorthogonal_axes.return_value = supports_nonortho
return presenter, data_view_mock
def create_workspace_mock():
# Mock out workspace methods needed for SliceViewerModel.__init__
workspace = mock.Mock(spec=MultipleExperimentInfos)
workspace.isMDHistoWorkspace = lambda: False
workspace.getNumDims = lambda: 2
workspace.name = lambda: "workspace"
return workspace
class SliceViewerTest(unittest.TestCase):
def setUp(self):
self.view = mock.Mock(spec=SliceViewerView)
data_view = mock.Mock(spec=SliceViewerDataView)
data_view.plot_MDH = mock.Mock()
data_view.dimensions = mock.Mock()
data_view.norm_opts = mock.Mock()
data_view.image_info_widget = mock.Mock()
data_view.canvas = mock.Mock()
data_view.nonorthogonal_mode = False
data_view.nonortho_transform = None
data_view.get_axes_limits.return_value = None
dimensions = mock.Mock()
dimensions.get_slicepoint.return_value = [None, None, 0.5]
dimensions.transpose = False
dimensions.get_slicerange.return_value = [None, None, (-15, 15)]
dimensions.qflags = [True, True, True]
data_view.dimensions = dimensions
self.view.data_view = data_view
self.model = mock.Mock(spec=SliceViewerModel)
self.model.get_ws = mock.Mock()
self.model.get_data = mock.Mock()
self.model.rebin = mock.Mock()
self.model.workspace_equals = mock.Mock()
self.model.get_properties.return_value = {
"workspace_type": "WS_TYPE.MATRIX",
"supports_normalise": True,
"supports_nonorthogonal_axes": False,
"supports_dynamic_rebinning": False,
"supports_peaks_overlays": True
}
@patch("sip.isdeleted", return_value=False)
def test_sliceviewer_MDH(self, _):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MDH)
presenter = SliceViewer(None, model=self.model, view=self.view)
# setup calls
self.assertEqual(self.model.get_dimensions_info.call_count, 0)
self.assertEqual(self.model.get_ws.call_count, 1)
self.assertEqual(self.model.get_properties.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 1)
self.assertEqual(self.view.data_view.plot_MDH.call_count, 1)
# new_plot
self.model.reset_mock()
self.view.reset_mock()
presenter.new_plot()
self.assertEqual(self.model.get_ws.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 1)
self.assertEqual(self.view.data_view.plot_MDH.call_count, 1)
# update_plot_data
self.model.reset_mock()
self.view.reset_mock()
presenter.update_plot_data()
self.assertEqual(self.model.get_data.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 1)
self.assertEqual(self.view.data_view.update_plot_data.call_count, 1)
@patch("sip.isdeleted", return_value=False)
def test_sliceviewer_MDE(self, _):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MDE)
presenter = SliceViewer(None, model=self.model, view=self.view)
# setup calls
self.assertEqual(self.model.get_dimensions_info.call_count, 0)
self.assertEqual(self.model.get_ws_MDE.call_count, 1)
self.assertEqual(self.model.get_properties.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_bin_params.call_count, 1)
self.assertEqual(self.view.data_view.plot_MDH.call_count, 1)
# new_plot
self.model.reset_mock()
self.view.reset_mock()
presenter.new_plot()
self.assertEqual(self.model.get_ws_MDE.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_bin_params.call_count, 1)
self.assertEqual(self.view.data_view.plot_MDH.call_count, 1)
# update_plot_data
self.model.reset_mock()
self.view.reset_mock()
presenter.update_plot_data()
self.assertEqual(self.model.get_data.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_bin_params.call_count, 1)
self.assertEqual(self.view.data_view.update_plot_data.call_count, 1)
@patch("sip.isdeleted", return_value=False)
def test_sliceviewer_matrix(self, _):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MATRIX)
presenter = SliceViewer(None, model=self.model, view=self.view)
# setup calls
self.assertEqual(self.model.get_dimensions_info.call_count, 0)
self.assertEqual(self.model.get_ws.call_count, 1)
self.assertEqual(self.model.get_properties.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 0)
self.assertEqual(self.view.data_view.plot_matrix.call_count, 1)
# new_plot
self.model.reset_mock()
self.view.reset_mock()
presenter.new_plot()
self.assertEqual(self.model.get_ws.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 0)
self.assertEqual(self.view.data_view.plot_matrix.call_count, 1)
@patch("sip.isdeleted", return_value=False)
def test_normalization_change_set_correct_normalization(self, _):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MATRIX)
self.view.data_view.plot_matrix = mock.Mock()
presenter = SliceViewer(None, model=self.model, view=self.view)
presenter.normalization_changed("By bin width")
self.view.data_view.plot_matrix.assert_called_with(self.model.get_ws(), distribution=False)
def peaks_button_disabled_if_model_cannot_support_it(self):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MATRIX)
self.model.can_support_peaks_overlay.return_value = False
SliceViewer(None, model=self.model, view=self.view)
self.view.data_view.disable_tool_button.assert_called_once_with(ToolItemText.OVERLAY_PEAKS)
def peaks_button_not_disabled_if_model_can_support_it(self):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MATRIX)
self.model.can_support_peaks_overlay.return_value = True
SliceViewer(None, model=self.model, view=self.view)
self.view.data_view.disable_tool_button.assert_not_called()
@patch("sip.isdeleted", return_value=False)
def test_non_orthogonal_axes_toggled_on(self, _):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MDE)
self.model.get_dim_limits.return_value = ((-1, 1), (-2, 2))
self.model.is_ragged_matrix_plotted.return_value = False
data_view_mock = self.view.data_view
data_view_mock.plot_MDH = mock.Mock()
presenter = SliceViewer(None, model=self.model, view=self.view)
data_view_mock.plot_MDH.reset_mock() # clear initial plot call
data_view_mock.create_axes_orthogonal.reset_mock()
presenter.nonorthogonal_axes(True)
data_view_mock.deactivate_and_disable_tool.assert_called_once_with(
ToolItemText.REGIONSELECTION)
data_view_mock.create_axes_nonorthogonal.assert_called_once()
data_view_mock.create_axes_orthogonal.assert_not_called()
self.assertEqual(data_view_mock.plot_MDH.call_count, 2)
data_view_mock.disable_tool_button.assert_has_calls([mock.call(ToolItemText.LINEPLOTS)])
@patch("sip.isdeleted", return_value=False)
@mock.patch("mantidqt.widgets.sliceviewer.presenter.SliceInfo")
def test_non_orthogonal_axes_toggled_off(self, mock_sliceinfo_cls, _):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MDE)
presenter, data_view_mock = _create_presenter(self.model,
self.view,
mock_sliceinfo_cls,
enable_nonortho_axes=True,
supports_nonortho=True)
data_view_mock.plot_MDH.reset_mock() # clear initial plot call
data_view_mock.create_axes_orthogonal.reset_mock()
data_view_mock.create_axes_nonorthogonal.reset_mock()
data_view_mock.enable_tool_button.reset_mock()
data_view_mock.disable_tool_button.reset_mock()
data_view_mock.remove_line_plots.reset_mock()
presenter.nonorthogonal_axes(False)
data_view_mock.create_axes_orthogonal.assert_called_once()
data_view_mock.create_axes_nonorthogonal.assert_not_called()
data_view_mock.plot_MDH.assert_called_once()
data_view_mock.enable_tool_button.assert_has_calls(
(mock.call(ToolItemText.LINEPLOTS), mock.call(ToolItemText.REGIONSELECTION)))
@patch("sip.isdeleted", return_value=False)
def test_request_to_show_all_data_sets_correct_limits_on_view_MD(self, _):
presenter = SliceViewer(None, model=self.model, view=self.view)
self.model.is_ragged_matrix_plotted.return_value = False
self.model.get_dim_limits.return_value = ((-1, 1), (-2, 2))
presenter.show_all_data_requested()
data_view = self.view.data_view
self.model.get_dim_limits.assert_called_once_with([None, None, 0.5],
data_view.dimensions.transpose)
data_view.get_full_extent.assert_not_called()
data_view.set_axes_limits.assert_called_once_with((-1, 1), (-2, 2))
@patch("sip.isdeleted", return_value=False)
def test_request_to_show_all_data_sets_correct_limits_on_view_ragged_matrix(self, _):
presenter = SliceViewer(None, model=self.model, view=self.view)
self.model.is_ragged_matrix_plotted.return_value = True
self.view.data_view.get_full_extent.return_value = [-1, 1, -2, 2]
presenter.show_all_data_requested()
data_view = self.view.data_view
self.model.get_dim_limits.assert_not_called()
data_view.set_axes_limits.assert_called_once_with((-1, 1), (-2, 2))
@patch("sip.isdeleted", return_value=False)
def test_data_limits_changed_creates_new_plot_if_dynamic_rebinning_supported(self, _):
presenter = SliceViewer(None, model=self.model, view=self.view)
self.model.can_support_dynamic_rebinning.return_value = True
new_plot_mock = mock.MagicMock()
presenter.new_plot = new_plot_mock
presenter.data_limits_changed()
new_plot_mock.assert_called_once()
@patch("sip.isdeleted", return_value=False)
def test_data_limits_changed_does_not_create_new_plot_if_dynamic_rebinning_not_supported(self, _):
presenter = SliceViewer(None, model=self.model, view=self.view)
self.model.can_support_dynamic_rebinning.return_value = False
new_plot_mock = mock.MagicMock()
presenter.new_plot = new_plot_mock
presenter.data_limits_changed()
new_plot_mock.assert_not_called()
@patch("sip.isdeleted", return_value=False)
@mock.patch("mantidqt.widgets.sliceviewer.presenter.SliceInfo")
def test_changing_dimensions_in_nonortho_mode_switches_to_ortho_when_dim_not_Q(
self, mock_sliceinfo_cls, is_view_delete):
presenter, data_view_mock = _create_presenter(self.model,
self.view,
mock_sliceinfo_cls,
enable_nonortho_axes=True,
supports_nonortho=False)
presenter.dimensions_changed()
data_view_mock.disable_tool_button.assert_called_once_with(ToolItemText.NONORTHOGONAL_AXES)
data_view_mock.create_axes_orthogonal.assert_called_once()
data_view_mock.create_axes_nonorthogonal.assert_not_called()
@patch("sip.isdeleted", return_value=False)
@mock.patch("mantidqt.widgets.sliceviewer.presenter.SliceInfo")
def test_changing_dimensions_in_nonortho_mode_keeps_nonortho_when_dim_is_Q(
self, mock_sliceinfo_cls, _):
presenter, data_view_mock = _create_presenter(self.model,
self.view,
mock_sliceinfo_cls,
enable_nonortho_axes=True,
supports_nonortho=True)
presenter.dimensions_changed()
data_view_mock.create_axes_nonorthogonal.assert_called_once()
data_view_mock.disable_tool_button.assert_not_called()
data_view_mock.create_axes_orthogonal.assert_not_called()
@patch("sip.isdeleted", return_value=False)
@mock.patch("mantidqt.widgets.sliceviewer.presenter.SliceInfo")
def test_changing_dimensions_in_ortho_mode_disables_nonortho_btn_if_not_supported(
self, mock_sliceinfo_cls, _):
presenter, data_view_mock = _create_presenter(self.model,
self.view,
mock_sliceinfo_cls,
enable_nonortho_axes=False,
supports_nonortho=False)
presenter.dimensions_changed()
data_view_mock.disable_tool_button.assert_called_once_with(ToolItemText.NONORTHOGONAL_AXES)
@patch("sip.isdeleted", return_value=False)
@mock.patch("mantidqt.widgets.sliceviewer.presenter.SliceInfo")
def test_changing_dimensions_in_ortho_mode_enables_nonortho_btn_if_supported(
self, mock_sliceinfo_cls, _):
presenter, data_view_mock = _create_presenter(self.model,
self.view,
mock_sliceinfo_cls,
enable_nonortho_axes=False,
supports_nonortho=True)
presenter.dimensions_changed()
data_view_mock.enable_tool_button.assert_called_once_with(ToolItemText.NONORTHOGONAL_AXES)
@patch("sip.isdeleted", return_value=False)
@mock.patch("mantidqt.widgets.sliceviewer.peaksviewer.presenter.TableWorkspaceDataPresenterStandard")
@mock.patch("mantidqt.widgets.sliceviewer.presenter.PeaksViewerCollectionPresenter",
spec=PeaksViewerCollectionPresenter)
def test_overlay_peaks_workspaces_attaches_view_and_draws_peaks(self, mock_peaks_presenter, *_):
for nonortho_axes in (False, True):
presenter, _ = _create_presenter(self.model, self.view, mock.MagicMock(), nonortho_axes,
nonortho_axes)
presenter.view.query_peaks_to_overlay.side_effect = ["peaks_workspace"]
presenter.overlay_peaks_workspaces()
presenter.view.query_peaks_to_overlay.assert_called_once()
mock_peaks_presenter.assert_called_once()
mock_peaks_presenter.overlay_peaksworkspaces.asssert_called_once()
mock_peaks_presenter.reset_mock()
presenter.view.query_peaks_to_overlay.reset_mock()
@patch("sip.isdeleted", return_value=False)
def test_gui_starts_with_zoom_selected(self, _):
SliceViewer(None, model=self.model, view=self.view)
self.view.data_view.activate_tool.assert_called_once_with(ToolItemText.ZOOM)
@patch("sip.isdeleted", return_value=False)
def test_replace_workspace_returns_when_the_workspace_is_not_the_model_workspace(self, _):
self.model.workspace_equals.return_value = False
presenter, _ = _create_presenter(self.model,
self.view,
mock.MagicMock(),
enable_nonortho_axes=False,
supports_nonortho=False)
presenter.update_view = mock.Mock()
presenter._decide_plot_update_methods = mock.Mock()
other_workspace = mock.Mock()
presenter.replace_workspace('other_workspace', other_workspace)
presenter._decide_plot_update_methods.assert_not_called()
presenter.update_view.assert_not_called()
@patch("sip.isdeleted", return_value=False)
def test_replace_workspace_closes_view_when_model_properties_change(self, _):
self.model.workspace_equals.return_value = True
presenter, _ = _create_presenter(self.model,
self.view,
mock.MagicMock(),
enable_nonortho_axes=False,
supports_nonortho=False)
presenter.refresh_view = mock.Mock()
presenter._decide_plot_update_methods = mock.Mock()
workspace = create_workspace_mock()
# Not equivalent to self.model.get_properties()
new_model_properties = {
"workspace_type": "WS_TYPE.MDE",
"supports_normalise": False,
"supports_nonorthogonal_axes": False,
"supports_dynamic_rebinning": False,
"supports_peaks_overlays": True
}
with patch.object(SliceViewerModel, "get_properties", return_value=new_model_properties):
presenter.replace_workspace('workspace', workspace)
self.view.emit_close.assert_called_once()
presenter._decide_plot_update_methods.assert_not_called()
presenter.refresh_view.assert_not_called()
@patch("sip.isdeleted", return_value=False)
def test_replace_workspace_updates_view(self, _):
presenter, _ = _create_presenter(self.model,
self.view,
mock.MagicMock(),
enable_nonortho_axes=False,
supports_nonortho=False)
self.view.delayed_refresh = mock.Mock()
presenter._decide_plot_update_methods = mock.Mock(
return_value=(presenter.new_plot_matrix(), presenter.update_plot_data_matrix()))
workspace = create_workspace_mock()
new_model_properties = self.model.get_properties()
# Patch get_properties so that the properties of the new model match those of self.model
with patch.object(SliceViewerModel, "get_properties", return_value=new_model_properties):
presenter.replace_workspace('workspace', workspace)
self.view.emit_close.assert_not_called()
presenter._decide_plot_update_methods.assert_called_once()
self.view.delayed_refresh.assert_called_once()
@patch("sip.isdeleted", return_value=False)
def test_refresh_view(self, _):
presenter, _ = _create_presenter(self.model,
self.view,
mock.MagicMock(),
enable_nonortho_axes=False,
supports_nonortho=False)
presenter.new_plot = mock.Mock()
presenter.refresh_view()
self.view.data_view.image_info_widget.setWorkspace.assert_called()
self.view.setWindowTitle.assert_called_with(self.model.get_title())
presenter.new_plot.assert_called_once()
@patch("sip.isdeleted", return_value=True)
def test_refresh_view_does_nothing_when_view_deleted(self, _):
presenter, _ = _create_presenter(self.model,
self.view,
mock.MagicMock(),
enable_nonortho_axes=False,
supports_nonortho=False)
presenter.new_plot = mock.Mock()
presenter.refresh_view()
self.view.data_view.image_info_widget.setWorkspace.assert_not_called()
presenter.new_plot.assert_not_called()
@patch("sip.isdeleted", return_value=False)
def test_clear_observer_peaks_presenter_not_none(self, _):
presenter, _ = _create_presenter(self.model,
self.view,
mock.MagicMock(),
enable_nonortho_axes=False,
supports_nonortho=False)
presenter._peaks_presenter = mock.MagicMock()
presenter.clear_observer()
presenter._peaks_presenter.clear_observer.assert_called_once()
@patch("sip.isdeleted", return_value=False)
def test_clear_observer_peaks_presenter_is_none(self, _):
presenter, _ = _create_presenter(self.model,
self.view,
mock.MagicMock(),
enable_nonortho_axes=False,
supports_nonortho=False)
presenter._peaks_presenter = None
# Will raise exception if misbehaving.
presenter.clear_observer()
@patch("sip.isdeleted", return_value=False)
@mock.patch("mantidqt.widgets.sliceviewer.presenter.SliceInfo")
@mock.patch("mantidqt.widgets.sliceviewer.presenter.PeaksViewerCollectionPresenter",
spec=PeaksViewerCollectionPresenter)
def test_peak_add_delete_event(self, mock_peaks_presenter, mock_sliceinfo_cls, _):
mock_sliceinfo_cls().inverse_transform = mock.Mock(side_effect=lambda pos: pos[::-1])
mock_sliceinfo_cls().z_value = 3
presenter, _ = _create_presenter(self.model,
self.view,
mock_sliceinfo_cls,
enable_nonortho_axes=False,
supports_nonortho=True)
presenter._peaks_presenter = mock_peaks_presenter
event = mock.Mock()
event.inaxes = True
event.xdata = 1.0
event.ydata = 2.0
presenter.add_delete_peak(event)
mock_sliceinfo_cls.get_sliceinfo.assert_not_called()
mock_peaks_presenter.add_delete_peak.assert_called_once_with([3, 2, 1])
self.view.data_view.canvas.draw_idle.assert_called_once()
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |