repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
voxlol/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
bottler/iisignature | src/gcc/mem_profile_analyse.py | 1 | 3795 | import sqlite3
con = sqlite3.connect("mem_profile.sqlite")
if 0:
#Check if there are duplicates
q="select count(*) as AA from A group by method,basis,d,m having AA>1"
print (con.execute(q).fetchall())
q="select count(*) from A"
print (con.execute(q).fetchall())
q="select * from A order by method,basis,d,m"
all=con.execute(q).fetchall()
i=0
for line in all:
i+=1
print (line)
print(i)
exit(0)
import tabulate, sqlite3, numpy
import numpy as np
#import grapher
#https://www.bastibl.net/publication-quality-plots/
import matplotlib as mpl
save=1
if save:
mpl.use("pdf")
import matplotlib.pyplot as plt
useLibertine = True
plt.rc('text', usetex=True)
if useLibertine:
pre="""
\usepackage[T1]{fontenc}
\usepackage[tt=false,type1=true]{libertine}
%\setmonofont{inconsolata}
\usepackage[varqu]{zi4}
\usepackage[libertine]{newtxmath}
"""
plt.rcParams['text.latex.preamble'] = pre #'\usepackage{libertine},\usepackage[libertine]{newtxmath},\usepackage{sfmath},\usepackage[T1]{fontenc}'
else:
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'monospace' : ['Computer Modern']})
plt.rc('xtick', labelsize=8)
plt.rc('ytick', labelsize=8)
plt.rc('legend', fontsize=8)
plt.rc('axes', labelsize=8)
mylinewidth=0.6
plt.rc('axes',linewidth=mylinewidth)#default 1
width = 3.487
height = width / 1.618
fig, ax = plt.subplots()
fig.subplots_adjust(left=.15, bottom=.16, right=.99, top=.97)
a=con.execute("select BYTES, D,M,METHOD,BASIS from A").fetchall()
b=[i for i in a]
bch_extra=0
def get_basic(d,m,method,basis):
found=False
for i in b:
if (d,m,method,basis)==(i[1],i[2],i[3],i[4]):
if found:
if usage != i[0]:
raise RuntimeError("two inconsistent copies of "+str((d,m,method)))
else:
found=True
usage=i[0]
if not found:
raise RuntimeError("no copies of "+str((d,m,method)))
return usage
#bch_extra=get_basic(0,0,"C","L")
def get(d,m,method,basis):
usage = get_basic(d,m,method,basis)
if 0:
if "C" == method:
return usage-get_basic(0,0,"C","L")
return usage
return usage-get_basic(d,1,method,basis)
#print (tabulate.tabulate(b,headers=["method","d","m", "reps","time"]))
Order = ["C","S"]
d=3
max_m={2:11,3:10,4:6,5:6}[d]
x=list(range(2,1+max_m))
y=[[get(d,m,method,"L") for m in x] for method in Order]
#series = [i for j in [[x,k,"+"] for k in y] for i in j]
#plt.plot(*series)
#d,x,o,8 and all the triangles look bigger , 8 and o look identical
#.,+,* look smaller
#These are the symbols and colours for the timings
#graph which uses the order C/O/S/esig
#so we just need elements 0 and 2
symbols=["v","o","x","d"]
colours=["r","b","g","k"]
symbols=(symbols[0],symbols[2])
colours=(colours[0],colours[2])
for i,nm,symbol,col in zip(y,Order,symbols,colours):
#plt.plot(x,i,symbol,label=nm)
nm1=r"\verb|"+nm+"|"
plt.scatter(x,i,c=col,marker=symbol,label=nm1,edgecolors='none')
#prop=mpl.font_manager.FontProperties("monospace")
legend=plt.legend(loc="upper left")
legend.get_frame().set_linewidth(mylinewidth)
plt.xlabel('level')
plt.yscale('log')
#plt.xlim(1.5,max_m+0.5)
plt.ylabel('usage(bytes) - logarithmic scale')
#grapher.pushplot(plt)
#plt.draw()
#dpi doesn't change the scale in the picture
filename = "/home/jeremyr/Dropbox/phd/graphs/memsweep"+str(d)+"d"+("Lib" if useLibertine else "")
if save:
fig.set_size_inches(width,height)
plt.savefig(filename+".pdf")
else:
plt.show()
#run this to check that the graph looks good in monochrome
if 0:
#plt.savefig(filename+".png",dpi=300)
from PIL import Image
img = Image.open(filename+".png").convert('LA')
img.save(filename+"bw.png")
| mit |
PascalSteger/twiddle | analysis/plot_abundances.py | 1 | 1071 | #!/usr/bin/env python3
## \file
# plot abundances from cat rectest.log|grep "Y:"|cut -d":" -f2|pr -s -t -l9|tee rectest.col
import sys
infile = sys.argv[1]; outfile = sys.argv[2]
from matplotlib import pyplot as PLT
fig = PLT.figure()
ax1 = fig.add_subplot(111)
import numpy as NP
with open(infile) as f:
v = NP.loadtxt(f, dtype='float', comments="#", skiprows=0, unpack=True)#delimiter=",", usecols=[col]
print(v)
import math
import scipy
z = 1/v[0]-1
PLT.plot(z,v[1],c='black',label='e')
PLT.plot(z,v[2],c='red',label='HI')
PLT.plot(z,v[3],c='orange',label='HII')
PLT.plot(z,v[4],c='green',label='HeI')
PLT.plot(z,v[5],c='cyan',label='HeII')
PLT.plot(z,v[6],c='violet',label='HeIII')
PLT.plot(z,v[7],c='black',label='H-')
PLT.plot(z,v[8],c='blue',label='H2')
PLT.plot(z,v[9],c='brown',label='H2*')
PLT.xscale('log'); PLT.yscale('log')
PLT.xlim(8e1,10**4)
PLT.ylim(10**-15,10**1)
PLT.xlabel(r'z')
#PLT.xticks(NP.logspace(0,2,3),['1','10','100'])
PLT.ylabel(r'$n_i/n_{(HI+HII)}$')
#PLT.yticks(NP.logspace(-7,3,6))
PLT.legend(loc=2)
PLT.savefig(outfile)
| gpl-2.0 |
astocko/statsmodels | statsmodels/examples/ex_kernel_singleindex_dgp.py | 34 | 3438 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 06 09:50:54 2013
Author: Josef Perktold
"""
from __future__ import print_function
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
#from statsmodels.nonparametric.api import KernelReg
import statsmodels.sandbox.nonparametric.kernel_extras as smke
import statsmodels.sandbox.nonparametric.dgp_examples as dgp
class UnivariateFunc1a(dgp.UnivariateFunc1):
def het_scale(self, x):
return 0.5
seed = np.random.randint(999999)
#seed = 430973
#seed = 47829
seed = 648456 #good seed for het_scale = 0.5
print(seed)
np.random.seed(seed)
nobs, k_vars = 300, 3
x = np.random.uniform(-2, 2, size=(nobs, k_vars))
xb = x.sum(1) / 3 #beta = [1,1,1]
funcs = [#dgp.UnivariateFanGijbels1(),
#dgp.UnivariateFanGijbels2(),
#dgp.UnivariateFanGijbels1EU(),
#dgp.UnivariateFanGijbels2(distr_x=stats.uniform(-2, 4))
UnivariateFunc1a(x=xb)
]
res = []
fig = plt.figure()
for i,func in enumerate(funcs):
#f = func()
f = func
# mod0 = smke.SingleIndexModel(endog=[f.y], exog=[xb], #reg_type='ll',
# var_type='c')#, bw='cv_ls')
# mean0, mfx0 = mod0.fit()
model = smke.SingleIndexModel(endog=[f.y], exog=x, #reg_type='ll',
var_type='ccc')#, bw='cv_ls')
mean, mfx = model.fit()
ax = fig.add_subplot(1, 1, i+1)
f.plot(ax=ax)
xb_est = np.dot(model.exog, model.b)
sortidx = np.argsort(xb_est) #f.x)
ax.plot(f.x[sortidx], mean[sortidx], 'o', color='r', lw=2, label='est. mean')
# ax.plot(f.x, mean0, color='g', lw=2, label='est. mean')
ax.legend(loc='upper left')
res.append((model, mean, mfx))
fig.suptitle('Kernel Regression')
fig.show()
alpha = 0.7
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(f.x[sortidx], f.y[sortidx], 'o', color='b', lw=2, alpha=alpha, label='observed')
ax.plot(f.x[sortidx], f.y_true[sortidx], 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
ax.plot(f.x[sortidx], mean[sortidx], 'o', color='r', lw=2, alpha=alpha, label='est. mean')
ax.legend(loc='upper left')
fig = plt.figure()
# ax = fig.add_subplot(1, 2, 1)
# ax.plot(f.x, f.y, 'o', color='b', lw=2, alpha=alpha, label='observed')
# ax.plot(f.x, f.y_true, 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
# ax.plot(f.x, mean, 'o', color='r', lw=2, alpha=alpha, label='est. mean')
# ax.legend(loc='upper left')
sortidx0 = np.argsort(xb)
ax = fig.add_subplot(1, 2, 1)
ax.plot(f.y[sortidx0], 'o', color='b', lw=2, alpha=alpha, label='observed')
ax.plot(f.y_true[sortidx0], 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
ax.plot(mean[sortidx0], 'o', color='r', lw=2, alpha=alpha, label='est. mean')
ax.legend(loc='upper left')
ax.set_title('Single Index Model (sorted by true xb)')
ax = fig.add_subplot(1, 2, 2)
ax.plot(f.y[sortidx], 'o', color='b', lw=2, alpha=alpha, label='observed')
ax.plot(f.y_true[sortidx], 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
ax.plot(mean[sortidx], 'o', color='r', lw=2, alpha=alpha, label='est. mean')
ax.legend(loc='upper left')
ax.set_title('Single Index Model (sorted by estimated xb)')
plt.show()
| bsd-3-clause |
evgchz/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 7 | 40770 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
"""Input format tests. """
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
"""Test whether clone works ok. """
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory = SGDClassifier
def test_sgd(self):
"""Check that SGD gives any results :-)"""
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
"""Check whether expected ValueError on bad l1_ratio"""
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
"""Check whether expected ValueError on bad learning_rate"""
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
"""Check whether expected ValueError on bad eta0"""
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
"""Check whether expected ValueError on bad alpha"""
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
"""Check whether expected ValueError on bad penalty"""
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
"""Check whether expected ValueError on bad loss"""
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
"""Test parameter validity check"""
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
"""Test parameter validity check"""
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
"""Checks coef_init not allowed as model argument (only fit)"""
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
"""Checks coef_init shape for the warm starts"""
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
"""Checks intercept_ shape for the warm starts"""
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
"""Checks intercept_ shape for the warm starts in binary case"""
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
"""Checks the SGDClassifier correctly computes the average weights"""
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
"""Checks intercept_ shape consistency for the warm starts"""
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
"""Target must have at least two labels"""
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_auto(self):
"""partial_fit with class_weight='auto' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'auto' is not supported for "
"partial_fit. In order to use 'auto' weights, "
"use compute_class_weight\('auto', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='auto').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
"""Multi-class test case"""
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
"""Multi-class average test case"""
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
"""Multi-class test case"""
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
"""Multi-class test case with multi-core support"""
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
"""Checks coef_init and intercept_init shape for for multi-class
problems"""
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
"""Check SGD.predict_proba"""
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([3, 2])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([-1, -1])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([3, 2])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([-1, -1])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([-1, -1])
d = clf.decision_function([-1, -1])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([3, 2])
p = clf.predict_proba([3, 2])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([-1, -1])
p = clf.predict_proba([-1, -1])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([3, 2])
p = clf.predict_proba([3, 2])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function(x)
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba(x)
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
"""Test L1 regularization"""
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
"""
Test class weights.
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
"""Test if equal class weights approx. equals no class weights. """
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
"""ValueError due to not existing class label."""
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
"""ValueError due to wrong class_weight argument type."""
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_auto_weight(self):
"""Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X)), 0.96,
decimal=1)
# make the same prediction using automated class_weight
clf_auto = self.factory(alpha=0.0001, n_iter=1000,
class_weight="auto").fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_auto.predict(X)), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "auto"
assert_array_almost_equal(clf.coef_, clf_auto.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred), 0.96)
# fit a model with auto class_weight enabled
clf = self.factory(n_iter=1000, class_weight="auto")
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="auto")
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred), 0.96)
def test_sample_weights(self):
"""Test weights on individual samples"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
"""Test if ValueError is raised if sample_weight has wrong shape"""
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
"""Partial_fit should work after initial fit in the multiclass case.
Non-regression test for #2496; fit would previously produce a
Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
"""
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
"""Test multiple calls of fit w/ different shaped inputs."""
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory = SGDRegressor
def test_sgd(self):
"""Check that SGD gives any results."""
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
"""Check whether expected ValueError on bad penalty"""
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
"""Check whether expected ValueError on bad loss"""
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
"""Tests the average regressor matches the naive implementation"""
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
"""Tests whether the partial fit yields the same average as the fit"""
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
"""Checks the average weights on data with 0s"""
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
"""Check that the SGD output is consistent with coordinate descent"""
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory = SparseSGDRegressor
def test_l1_ratio():
"""Test if l1 ratio extremes match L1 and L2 penalty settings. """
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1').fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2').fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
# Generate some weird data with unscaled features
rng = np.random.RandomState(42)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, 0] *= 100
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(scale(X), ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(scale(X), y)
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
| bsd-3-clause |
Clyde-fare/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 68 | 43439 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([[-1, -1]])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([[3, 2]])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([[-1, -1]])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
| bsd-3-clause |
roxyboy/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 303 | 2841 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix = dist_matrix + dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
HolgerPeters/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 348 | 6232 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| bsd-3-clause |
basnijholt/holoviews | holoviews/tests/core/testoptions.py | 1 | 50095 | import os
import pickle
from unittest import SkipTest
import numpy as np
from holoviews import Store, Histogram, Image, Curve, DynamicMap, opts
from holoviews.core.options import (
OptionError, Cycle, Options, OptionTree, StoreOptions, options_policy
)
from holoviews.element.comparison import ComparisonTestCase
from holoviews import plotting # noqa Register backends
Options.skip_invalid = False
try:
# Needed a backend to register backend and options
from holoviews.plotting import mpl # noqa
except:
pass
try:
# Needed to register backend and options
from holoviews.plotting import bokeh # noqa
except:
pass
class TestOptions(ComparisonTestCase):
def setUp(self):
self.original_option_groups = Options._option_groups
Options._option_groups = ['test']
super(TestOptions, self).setUp()
def tearDown(self):
Options._option_groups = self.original_option_groups
super(TestOptions, self).tearDown()
def test_options_init(self):
Options('test')
def test_options_valid_keywords1(self):
opts = Options('test', allowed_keywords=['kw1'], kw1='value')
self.assertEquals(opts.kwargs, {'kw1':'value'})
def test_options_valid_keywords2(self):
opts = Options('test', allowed_keywords=['kw1', 'kw2'], kw1='value')
self.assertEquals(opts.kwargs, {'kw1':'value'})
def test_options_valid_keywords3(self):
opts = Options('test', allowed_keywords=['kw1', 'kw2'], kw1='value1', kw2='value2')
self.assertEquals(opts.kwargs, {'kw1':'value1', 'kw2':'value2'})
def test_options_any_keywords3(self):
opts = Options('test', kw1='value1', kw2='value3')
self.assertEquals(opts.kwargs, {'kw1':'value1', 'kw2':'value3'})
def test_options_invalid_keywords1(self):
try:
Options('test', allowed_keywords=['kw1'], kw='value')
except OptionError as e:
self.assertEqual(str(e), "Invalid option 'kw', valid options are: ['kw1']")
def test_options_invalid_keywords2(self):
try:
Options('test', allowed_keywords=['kw2'], kw2='value', kw3='value')
except OptionError as e:
self.assertEqual(str(e), "Invalid option 'kw3', valid options are: ['kw2']")
def test_options_invalid_keywords_skip1(self):
with options_policy(skip_invalid=True, warn_on_skip=False):
opts = Options('test', allowed_keywords=['kw1'], kw='value')
self.assertEqual(opts.kwargs, {})
def test_options_invalid_keywords_skip2(self):
with options_policy(skip_invalid=True, warn_on_skip=False):
opts = Options('test', allowed_keywords=['kw1'], kw1='value', kw2='val')
self.assertEqual(opts.kwargs, {'kw1':'value'})
def test_options_record_invalid(self):
StoreOptions.start_recording_skipped()
with options_policy(skip_invalid=True, warn_on_skip=False):
Options('test', allowed_keywords=['kw1'], kw1='value', kw2='val')
errors = StoreOptions.stop_recording_skipped()
self.assertEqual(len(errors),1)
self.assertEqual(errors[0].invalid_keyword,'kw2')
def test_options_get_options(self):
opts = Options('test', allowed_keywords=['kw2', 'kw3'],
kw2='value', kw3='value').options
self.assertEqual(opts, dict(kw2='value', kw3='value'))
def test_options_get_options_cyclic1(self):
opts = Options('test', allowed_keywords=['kw2', 'kw3'],
kw2='value', kw3='value')
for i in range(16):
self.assertEqual(opts[i], dict(kw2='value', kw3='value'))
def test_options_keys(self):
opts = Options('test', allowed_keywords=['kw3', 'kw2'],
kw2='value', kw3='value')
self.assertEqual(opts.keys(), ['kw2', 'kw3'])
def test_options_inherit(self):
original_kws = dict(kw2='value', kw3='value')
opts = Options('test', **original_kws)
new_kws = dict(kw4='val4', kw5='val5')
new_opts = opts(**new_kws)
self.assertEqual(new_opts.options, dict(original_kws, **new_kws))
def test_options_inherit_invalid_keywords(self):
original_kws = dict(kw2='value', kw3='value')
opts = Options('test', allowed_keywords=['kw3', 'kw2'], **original_kws)
new_kws = dict(kw4='val4', kw5='val5')
try:
opts(**new_kws)
except OptionError as e:
self.assertEqual(str(e), "Invalid option 'kw4', valid options are: ['kw2', 'kw3']")
class TestCycle(ComparisonTestCase):
def setUp(self):
self.original_option_groups = Options._option_groups
Options._option_groups = ['test']
super(TestCycle, self).setUp()
def tearDown(self):
Options._option_groups = self.original_option_groups
super(TestCycle, self).tearDown()
def test_cycle_init(self):
Cycle(values=['a', 'b', 'c'])
Cycle(values=[1, 2, 3])
def test_cycle_expansion(self):
cycle1 = Cycle(values=['a', 'b', 'c'])
cycle2 = Cycle(values=[1, 2, 3])
opts = Options('test', one=cycle1, two=cycle2)
self.assertEqual(opts[0], {'one': 'a', 'two': 1})
self.assertEqual(opts[1], {'one': 'b', 'two': 2})
self.assertEqual(opts[2], {'one': 'c', 'two': 3})
self.assertEqual(opts[3], {'one': 'a', 'two': 1})
self.assertEqual(opts[4], {'one': 'b', 'two': 2})
self.assertEqual(opts[5], {'one': 'c', 'two': 3})
def test_cycle_expansion_unequal(self):
cycle1 = Cycle(values=['a', 'b', 'c', 'd'])
cycle2 = Cycle(values=[1, 2, 3])
opts = Options('test', one=cycle1, two=cycle2)
self.assertEqual(opts[0], {'one': 'a', 'two': 1})
self.assertEqual(opts[1], {'one': 'b', 'two': 2})
self.assertEqual(opts[2], {'one': 'c', 'two': 3})
self.assertEqual(opts[3], {'one': 'd', 'two': 1})
self.assertEqual(opts[4], {'one': 'a', 'two': 2})
self.assertEqual(opts[5], {'one': 'b', 'two': 3})
def test_cycle_slice(self):
cycle1 = Cycle(values=['a', 'b', 'c'])[2]
cycle2 = Cycle(values=[1, 2, 3])
opts = Options('test', one=cycle1, two=cycle2)
self.assertEqual(opts[0], {'one': 'a', 'two': 1})
self.assertEqual(opts[1], {'one': 'b', 'two': 2})
self.assertEqual(opts[2], {'one': 'a', 'two': 3})
self.assertEqual(opts[3], {'one': 'b', 'two': 1})
def test_cyclic_property_true(self):
cycle1 = Cycle(values=['a', 'b', 'c'])
opts = Options('test', one=cycle1, two='two')
self.assertEqual(opts.cyclic, True)
def test_cyclic_property_false(self):
opts = Options('test', one='one', two='two')
self.assertEqual(opts.cyclic, False)
def test_options_property_disabled(self):
cycle1 = Cycle(values=['a', 'b', 'c'])
opts = Options('test', one=cycle1)
try:
opts.options
except Exception as e:
self.assertEqual(str(e), "The options property may only be used with non-cyclic Options.")
class TestOptionTree(ComparisonTestCase):
def setUp(self):
if 'matplotlib' not in Store.renderers:
raise SkipTest('Matplotlib backend not available.')
super(TestOptionTree, self).setUp()
self.original_option_groups = Options._option_groups[:]
Options._option_groups = ['group1', 'group2']
def tearDown(self):
Options._option_groups = self.original_option_groups
super(TestOptionTree, self).tearDown()
def test_optiontree_init_1(self):
OptionTree(groups=['group1', 'group2'])
def test_optiontree_init_2(self):
OptionTree(groups=['group1', 'group2'])
def test_optiontree_setter_getter(self):
options = OptionTree(groups=['group1', 'group2'])
opts = Options('group1', kw1='value')
options.MyType = opts
self.assertEqual(options.MyType['group1'], opts)
self.assertEqual(options.MyType['group1'].options, {'kw1':'value'})
def test_optiontree_dict_setter_getter(self):
options = OptionTree(groups=['group1', 'group2'])
opts1 = Options(kw1='value1')
opts2 = Options(kw2='value2')
options.MyType = {'group1':opts1, 'group2':opts2}
self.assertEqual(options.MyType['group1'], opts1)
self.assertEqual(options.MyType['group1'].options, {'kw1':'value1'})
self.assertEqual(options.MyType['group2'], opts2)
self.assertEqual(options.MyType['group2'].options, {'kw2':'value2'})
def test_optiontree_inheritance(self):
if 'matplotlib' not in Store.renderers:
raise SkipTest("General to specific option test requires matplotlib")
options = OptionTree(groups=['group1', 'group2'])
opts1 = Options(kw1='value1')
opts2 = Options(kw2='value2')
options.MyType = {'group1':opts1, 'group2':opts2}
opts3 = Options(kw3='value3')
opts4 = Options(kw4='value4')
options.MyType.Child = {'group1':opts3, 'group2':opts4}
self.assertEqual(options.MyType.Child.options('group1').kwargs,
{'kw1':'value1', 'kw3':'value3'})
self.assertEqual(options.MyType.Child.options('group2').kwargs,
{'kw2':'value2', 'kw4':'value4'})
def test_optiontree_inheritance_flipped(self):
"""
Tests for ordering problems manifested in issue #93
"""
if 'matplotlib' not in Store.renderers:
raise SkipTest("General to specific option test requires matplotlib")
options = OptionTree(groups=['group1', 'group2'])
opts3 = Options(kw3='value3')
opts4 = Options(kw4='value4')
options.MyType.Child = {'group1':opts3, 'group2':opts4}
opts1 = Options(kw1='value1')
opts2 = Options(kw2='value2')
options.MyType = {'group1':opts1, 'group2':opts2}
self.assertEqual(options.MyType.Child.options('group1').kwargs,
{'kw1':'value1', 'kw3':'value3'})
self.assertEqual(options.MyType.Child.options('group2').kwargs,
{'kw2':'value2', 'kw4':'value4'})
class TestStoreInheritanceDynamic(ComparisonTestCase):
"""
Tests to prevent regression after fix in PR #646
"""
def setUp(self):
if 'matplotlib' not in Store.renderers:
raise SkipTest('Matplotlib backend not available.')
self.store_copy = OptionTree(sorted(Store.options().items()),
groups=Options._option_groups)
self.backend = 'matplotlib'
Store.current_backend = self.backend
super(TestStoreInheritanceDynamic, self).setUp()
def tearDown(self):
Store.options(val=self.store_copy)
Store._custom_options = {k:{} for k in Store._custom_options.keys()}
super(TestStoreInheritanceDynamic, self).tearDown()
def initialize_option_tree(self):
Store.options(val=OptionTree(groups=['plot', 'style']))
options = Store.options()
options.Image = Options('style', cmap='hot', interpolation='nearest')
return options
def test_merge_keywords(self):
options = self.initialize_option_tree()
options.Image = Options('style', clims=(0, 0.5))
expected = {'clims': (0, 0.5), 'cmap': 'hot', 'interpolation': 'nearest'}
direct_kws = options.Image.groups['style'].kwargs
inherited_kws = options.Image.options('style').kwargs
self.assertEqual(direct_kws, expected)
self.assertEqual(inherited_kws, expected)
def test_merge_keywords_disabled(self):
options = self.initialize_option_tree()
options.Image = Options('style', clims=(0, 0.5), merge_keywords=False)
expected = {'clims': (0, 0.5)}
direct_kws = options.Image.groups['style'].kwargs
inherited_kws = options.Image.options('style').kwargs
self.assertEqual(direct_kws, expected)
self.assertEqual(inherited_kws, expected)
def test_specification_general_to_specific_group(self):
"""
Test order of specification starting with general and moving
to specific
"""
if 'matplotlib' not in Store.renderers:
raise SkipTest("General to specific option test requires matplotlib")
options = self.initialize_option_tree()
obj = Image(np.random.rand(10,10), group='SomeGroup')
options.Image = Options('style', cmap='viridis')
options.Image.SomeGroup = Options('style', alpha=0.2)
expected = {'alpha': 0.2, 'cmap': 'viridis', 'interpolation': 'nearest'}
lookup = Store.lookup_options('matplotlib', obj, 'style')
self.assertEqual(lookup.kwargs, expected)
# Check the tree is structured as expected
node1 = options.Image.groups['style']
node2 = options.Image.SomeGroup.groups['style']
self.assertEqual(node1.kwargs, {'cmap': 'viridis', 'interpolation': 'nearest'})
self.assertEqual(node2.kwargs, {'alpha': 0.2})
def test_specification_general_to_specific_group_and_label(self):
"""
Test order of specification starting with general and moving
to specific
"""
if 'matplotlib' not in Store.renderers:
raise SkipTest("General to specific option test requires matplotlib")
options = self.initialize_option_tree()
obj = Image(np.random.rand(10,10), group='SomeGroup', label='SomeLabel')
options.Image = Options('style', cmap='viridis')
options.Image.SomeGroup.SomeLabel = Options('style', alpha=0.2)
expected = {'alpha': 0.2, 'cmap': 'viridis', 'interpolation': 'nearest'}
lookup = Store.lookup_options('matplotlib', obj, 'style')
self.assertEqual(lookup.kwargs, expected)
# Check the tree is structured as expected
node1 = options.Image.groups['style']
node2 = options.Image.SomeGroup.SomeLabel.groups['style']
self.assertEqual(node1.kwargs, {'cmap': 'viridis', 'interpolation': 'nearest'})
self.assertEqual(node2.kwargs, {'alpha': 0.2})
def test_specification_specific_to_general_group(self):
"""
Test order of specification starting with a specific option and
then specifying a general one
"""
if 'matplotlib' not in Store.renderers:
raise SkipTest("General to specific option test requires matplotlib")
options = self.initialize_option_tree()
options.Image.SomeGroup = Options('style', alpha=0.2)
obj = Image(np.random.rand(10,10), group='SomeGroup')
options.Image = Options('style', cmap='viridis')
expected = {'alpha': 0.2, 'cmap': 'viridis', 'interpolation': 'nearest'}
lookup = Store.lookup_options('matplotlib', obj, 'style')
self.assertEqual(lookup.kwargs, expected)
# Check the tree is structured as expected
node1 = options.Image.groups['style']
node2 = options.Image.SomeGroup.groups['style']
self.assertEqual(node1.kwargs, {'cmap': 'viridis', 'interpolation': 'nearest'})
self.assertEqual(node2.kwargs, {'alpha': 0.2})
def test_specification_specific_to_general_group_and_label(self):
"""
Test order of specification starting with general and moving
to specific
"""
if 'matplotlib' not in Store.renderers:
raise SkipTest("General to specific option test requires matplotlib")
options = self.initialize_option_tree()
options.Image.SomeGroup.SomeLabel = Options('style', alpha=0.2)
obj = Image(np.random.rand(10,10), group='SomeGroup', label='SomeLabel')
options.Image = Options('style', cmap='viridis')
expected = {'alpha': 0.2, 'cmap': 'viridis', 'interpolation': 'nearest'}
lookup = Store.lookup_options('matplotlib', obj, 'style')
self.assertEqual(lookup.kwargs, expected)
# Check the tree is structured as expected
node1 = options.Image.groups['style']
node2 = options.Image.SomeGroup.SomeLabel.groups['style']
self.assertEqual(node1.kwargs, {'cmap': 'viridis', 'interpolation': 'nearest'})
self.assertEqual(node2.kwargs, {'alpha': 0.2})
def test_custom_opts_to_default_inheritance(self):
"""
Checks customs inheritance backs off to default tree correctly
using .opts.
"""
options = self.initialize_option_tree()
options.Image.A.B = Options('style', alpha=0.2)
obj = Image(np.random.rand(10, 10), group='A', label='B')
expected_obj = {'alpha': 0.2, 'cmap': 'hot', 'interpolation': 'nearest'}
obj_lookup = Store.lookup_options('matplotlib', obj, 'style')
self.assertEqual(obj_lookup.kwargs, expected_obj)
# Customize this particular object
custom_obj = obj.opts(style=dict(clims=(0, 0.5)))
expected_custom_obj = dict(clims=(0,0.5), **expected_obj)
custom_obj_lookup = Store.lookup_options('matplotlib', custom_obj, 'style')
self.assertEqual(custom_obj_lookup.kwargs, expected_custom_obj)
def test_custom_magic_to_default_inheritance(self):
"""
Checks customs inheritance backs off to default tree correctly
simulating the %%opts cell magic.
"""
if 'matplotlib' not in Store.renderers:
raise SkipTest("Custom magic inheritance test requires matplotlib")
options = self.initialize_option_tree()
options.Image.A.B = Options('style', alpha=0.2)
obj = Image(np.random.rand(10, 10), group='A', label='B')
# Before customizing...
expected_obj = {'alpha': 0.2, 'cmap': 'hot', 'interpolation': 'nearest'}
obj_lookup = Store.lookup_options('matplotlib', obj, 'style')
self.assertEqual(obj_lookup.kwargs, expected_obj)
custom_tree = {0: OptionTree(groups=Options._option_groups,
style={'Image' : dict(clims=(0, 0.5))})}
Store._custom_options['matplotlib'] = custom_tree
obj.id = 0 # Manually set the id to point to the tree above
# Customize this particular object
expected_custom_obj = dict(clims=(0,0.5), **expected_obj)
custom_obj_lookup = Store.lookup_options('matplotlib', obj, 'style')
self.assertEqual(custom_obj_lookup.kwargs, expected_custom_obj)
class TestStoreInheritance(ComparisonTestCase):
"""
Tests to prevent regression after fix in 71c1f3a that resolves
issue #43
"""
def setUp(self):
if 'matplotlib' not in Store.renderers:
raise SkipTest('Matplotlib backend not available.')
self.store_copy = OptionTree(sorted(Store.options().items()),
groups=Options._option_groups)
self.backend = 'matplotlib'
Store.current_backend = self.backend
Store.options(val=OptionTree(groups=['plot', 'style']))
options = Store.options()
self.default_plot = dict(plot1='plot1', plot2='plot2')
options.Histogram = Options('plot', **self.default_plot)
self.default_style = dict(style1='style1', style2='style2')
options.Histogram = Options('style', **self.default_style)
data = [np.random.normal() for i in range(10000)]
frequencies, edges = np.histogram(data, 20)
self.hist = Histogram(frequencies, edges)
super(TestStoreInheritance, self).setUp()
def lookup_options(self, obj, group):
return Store.lookup_options(self.backend, obj, group)
def tearDown(self):
Store.options(val=self.store_copy)
Store._custom_options = {k:{} for k in Store._custom_options.keys()}
super(TestStoreInheritance, self).tearDown()
def test_original_style_options(self):
self.assertEqual(self.lookup_options(self.hist, 'style').options,
self.default_style)
def test_original_plot_options(self):
self.assertEqual(self.lookup_options(self.hist, 'plot').options,
self.default_plot)
def test_plot_inheritance_addition(self):
"Adding an element"
hist2 = self.hist.opts(plot={'plot3':'plot3'})
self.assertEqual(self.lookup_options(hist2, 'plot').options,
dict(plot1='plot1', plot2='plot2', plot3='plot3'))
# Check style works as expected
self.assertEqual(self.lookup_options(hist2, 'style').options, self.default_style)
def test_plot_inheritance_override(self):
"Overriding an element"
hist2 = self.hist.opts(plot={'plot1':'plot_child'})
self.assertEqual(self.lookup_options(hist2, 'plot').options,
dict(plot1='plot_child', plot2='plot2'))
# Check style works as expected
self.assertEqual(self.lookup_options(hist2, 'style').options, self.default_style)
def test_style_inheritance_addition(self):
"Adding an element"
hist2 = self.hist.opts(style={'style3':'style3'})
self.assertEqual(self.lookup_options(hist2, 'style').options,
dict(style1='style1', style2='style2', style3='style3'))
# Check plot options works as expected
self.assertEqual(self.lookup_options(hist2, 'plot').options, self.default_plot)
def test_style_inheritance_override(self):
"Overriding an element"
hist2 = self.hist.opts(style={'style1':'style_child'})
self.assertEqual(self.lookup_options(hist2, 'style').options,
dict(style1='style_child', style2='style2'))
# Check plot options works as expected
self.assertEqual(self.lookup_options(hist2, 'plot').options, self.default_plot)
def test_style_transfer(self):
if 'matplotlib' not in Store.renderers:
raise SkipTest("test_style_transfer requires matplotlib")
hist = self.hist.opts(style={'style1':'style_child'})
hist2 = self.hist.opts()
opts = Store.lookup_options('matplotlib', hist2, 'style').kwargs
self.assertEqual(opts, {'style1': 'style1', 'style2': 'style2'})
Store.transfer_options(hist, hist2, 'matplotlib')
opts = Store.lookup_options('matplotlib', hist2, 'style').kwargs
self.assertEqual(opts, {'style1': 'style_child', 'style2': 'style2'})
class TestOptionsMethod(ComparisonTestCase):
def setUp(self):
if 'matplotlib' not in Store.renderers:
raise SkipTest('Matplotlib backend not available.')
self.store_copy = OptionTree(sorted(Store.options().items()),
groups=Options._option_groups)
self.backend = 'matplotlib'
Store.set_current_backend(self.backend)
super(TestOptionsMethod, self).setUp()
def lookup_options(self, obj, group):
return Store.lookup_options(self.backend, obj, group)
def tearDown(self):
Store.options(val=self.store_copy)
Store._custom_options = {k:{} for k in Store._custom_options.keys()}
super(TestOptionsMethod, self).tearDown()
def test_plot_options_keywords(self):
im = Image(np.random.rand(10,10))
styled_im = im.options(interpolation='nearest', cmap='jet')
self.assertEqual(self.lookup_options(im, 'plot').options, {})
self.assertEqual(self.lookup_options(styled_im, 'style').options,
dict(cmap='jet', interpolation='nearest'))
def test_plot_options_one_object(self):
im = Image(np.random.rand(10,10))
imopts = opts.Image(interpolation='nearest', cmap='jet')
styled_im = im.options(imopts)
self.assertEqual(self.lookup_options(im, 'plot').options, {})
self.assertEqual(self.lookup_options(styled_im, 'style').options,
dict(cmap='jet', interpolation='nearest'))
def test_plot_options_two_object(self):
im = Image(np.random.rand(10,10))
imopts1 = opts.Image(interpolation='nearest')
imopts2 = opts.Image(cmap='hsv')
styled_im = im.options(imopts1,imopts2)
self.assertEqual(self.lookup_options(im, 'plot').options, {})
self.assertEqual(self.lookup_options(styled_im, 'style').options,
dict(cmap='hsv', interpolation='nearest'))
def test_plot_options_object_list(self):
im = Image(np.random.rand(10,10))
imopts1 = opts.Image(interpolation='nearest')
imopts2 = opts.Image(cmap='summer')
styled_im = im.options([imopts1,imopts2])
self.assertEqual(self.lookup_options(im, 'plot').options, {})
self.assertEqual(self.lookup_options(styled_im, 'style').options,
dict(cmap='summer', interpolation='nearest'))
class TestOptsMethod(ComparisonTestCase):
def setUp(self):
if 'matplotlib' not in Store.renderers:
raise SkipTest('Matplotlib backend not available.')
self.store_copy = OptionTree(sorted(Store.options().items()),
groups=Options._option_groups)
self.backend = 'matplotlib'
Store.set_current_backend(self.backend)
super(TestOptsMethod, self).setUp()
def lookup_options(self, obj, group):
return Store.lookup_options(self.backend, obj, group)
def tearDown(self):
Store.options(val=self.store_copy)
Store._custom_options = {k:{} for k in Store._custom_options.keys()}
super(TestOptsMethod, self).tearDown()
def test_old_opts_clone_disabled(self):
im = Image(np.random.rand(10,10))
styled_im = im.opts(style=dict(interpolation='nearest', cmap='jet'), clone=False)
self.assertEqual(self.lookup_options(im, 'plot').options, {})
self.assertEqual(self.lookup_options(styled_im, 'plot').options, {})
assert styled_im is im
self.assertEqual(self.lookup_options(im, 'style').options,
{'cmap': 'jet', 'interpolation': 'nearest'})
def test_old_opts_clone_enabled(self):
im = Image(np.random.rand(10,10))
styled_im = im.opts(style=dict(interpolation='nearest', cmap='jet'), clone=True)
self.assertEqual(self.lookup_options(im, 'plot').options, {})
self.assertEqual(self.lookup_options(styled_im, 'plot').options, {})
assert styled_im is not im
im_lookup = self.lookup_options(im, 'style').options
self.assertEqual(im_lookup['cmap'] == 'jet', False)
styled_im_lookup = self.lookup_options(styled_im, 'style').options
self.assertEqual(styled_im_lookup['cmap'] == 'jet', True)
def test_simple_clone_disabled(self):
im = Image(np.random.rand(10,10))
styled_im = im.opts(interpolation='nearest', cmap='jet', clone=False)
self.assertEqual(self.lookup_options(im, 'plot').options, {})
self.assertEqual(self.lookup_options(styled_im, 'plot').options, {})
assert styled_im is im
self.assertEqual(self.lookup_options(im, 'style').options,
{'cmap': 'jet', 'interpolation': 'nearest'})
def test_simple_opts_clone_enabled(self):
im = Image(np.random.rand(10,10))
styled_im = im.opts(interpolation='nearest', cmap='jet', clone=True)
self.assertEqual(self.lookup_options(im, 'plot').options, {})
self.assertEqual(self.lookup_options(styled_im, 'plot').options, {})
assert styled_im is not im
im_lookup = self.lookup_options(im, 'style').options
self.assertEqual(im_lookup['cmap'] == 'jet', False)
styled_im_lookup = self.lookup_options(styled_im, 'style').options
self.assertEqual(styled_im_lookup['cmap'] == 'jet', True)
def test_opts_method_with_utility(self):
im = Image(np.random.rand(10,10))
imopts = opts.Image(cmap='Blues')
styled_im = im.opts(imopts)
assert styled_im is im
self.assertEqual(self.lookup_options(im, 'style').options,
{'cmap': 'Blues', 'interpolation': 'nearest'})
def test_opts_method_dynamicmap_grouped(self):
dmap = DynamicMap(lambda X: Curve([1, 2, X]),
kdims=['X']).redim.range(X=(0, 3))
retval = dmap.opts({'plot': dict(width=700)})
assert retval is not dmap
self.assertEqual(self.lookup_options(retval[0], 'plot').options,
{'width':700})
def test_opts_clear(self):
im = Image(np.random.rand(10,10))
styled_im = im.opts(style=dict(cmap='jet', interpolation='nearest',
option1='A', option2='B'), clone=False)
self.assertEqual(self.lookup_options(im, 'style').options,
{'cmap': 'jet', 'interpolation': 'nearest',
'option1':'A', 'option2':'B'})
assert styled_im is im
cleared = im.opts.clear()
assert cleared is im
cleared_options = self.lookup_options(cleared, 'style').options
self.assertEqual(not any(k in ['option1', 'option2']
for k in cleared_options.keys()), True)
def test_opts_clear_clone(self):
im = Image(np.random.rand(10,10))
styled_im = im.opts(style=dict(cmap='jet', interpolation='nearest',
option1='A', option2='B'), clone=False)
self.assertEqual(self.lookup_options(im, 'style').options,
{'cmap': 'jet', 'interpolation': 'nearest',
'option1':'A', 'option2':'B'})
assert styled_im is im
cleared = im.opts.clear(clone=True)
assert cleared is not im
self.assertEqual(self.lookup_options(im, 'style').options,
{'cmap': 'jet', 'interpolation': 'nearest',
'option1':'A', 'option2':'B'})
cleared_options = self.lookup_options(cleared, 'style').options
self.assertEqual(not any(k in ['option1', 'option2']
for k in cleared_options.keys()), True)
class TestOptionTreeFind(ComparisonTestCase):
def setUp(self):
self.original_option_groups = Options._option_groups[:]
Options._option_groups = ['group']
options = OptionTree(groups=['group'])
self.opts1 = Options('group', kw1='value1')
self.opts2 = Options('group', kw2='value2')
self.opts3 = Options('group', kw3='value3')
self.opts4 = Options('group', kw4='value4')
self.opts5 = Options('group', kw5='value5')
self.opts6 = Options('group', kw6='value6')
options.MyType = self.opts1
options.XType = self.opts2
options.MyType.Foo = self.opts3
options.MyType.Bar = self.opts4
options.XType.Foo = self.opts5
options.XType.Bar = self.opts6
self.options = options
self.original_options = Store.options()
Store.options(val = OptionTree(groups=['group']))
def tearDown(self):
Options._option_groups = self.original_option_groups
Store.options(val=self.original_options)
Store._custom_options = {k:{} for k in Store._custom_options.keys()}
def test_optiontree_find1(self):
self.assertEqual(self.options.find('MyType').options('group').options,
dict(kw1='value1'))
def test_optiontree_find2(self):
self.assertEqual(self.options.find('XType').options('group').options,
dict(kw2='value2'))
def test_optiontree_find3(self):
self.assertEqual(self.options.find('MyType.Foo').options('group').options,
dict(kw1='value1', kw3='value3'))
def test_optiontree_find4(self):
self.assertEqual(self.options.find('MyType.Bar').options('group').options,
dict(kw1='value1', kw4='value4'))
def test_optiontree_find5(self):
self.assertEqual(self.options.find('XType.Foo').options('group').options,
dict(kw2='value2', kw5='value5'))
def test_optiontree_find6(self):
self.assertEqual(self.options.find('XType.Bar').options('group').options,
dict(kw2='value2', kw6='value6'))
def test_optiontree_find_mismatch1(self):
self.assertEqual(self.options.find('MyType.Baz').options('group').options,
dict(kw1='value1'))
def test_optiontree_find_mismatch2(self):
self.assertEqual(self.options.find('XType.Baz').options('group').options,
dict(kw2='value2'))
def test_optiontree_find_mismatch3(self):
self.assertEqual(self.options.find('Baz').options('group').options, dict())
def test_optiontree_find_mismatch4(self):
self.assertEqual(self.options.find('Baz.Baz').options('group').options, dict())
class TestCrossBackendOptions(ComparisonTestCase):
"""
Test the style system can style a single object across backends.
"""
def setUp(self):
if 'matplotlib' not in Store.renderers:
raise SkipTest("Cross background tests assumes matplotlib is available")
if 'bokeh' not in Store.renderers:
raise SkipTest("Cross background tests assumes bokeh is available.")
# Some tests require that plotly isn't loaded
self.plotly_options = Store._options.pop('plotly', None)
self.store_mpl = OptionTree(sorted(Store.options(backend='matplotlib').items()),
groups=Options._option_groups)
self.store_bokeh = OptionTree(sorted(Store.options(backend='bokeh').items()),
groups=Options._option_groups)
self.clear_options()
super(TestCrossBackendOptions, self).setUp()
def clear_options(self):
# Clear global options..
Store.options(val=OptionTree(groups=['plot', 'style']), backend='matplotlib')
Store.options(val=OptionTree(groups=['plot', 'style']), backend='bokeh')
# ... and custom options
Store.custom_options({}, backend='matplotlib')
Store.custom_options({}, backend='bokeh')
def tearDown(self):
Store.options(val=self.store_mpl, backend='matplotlib')
Store.options(val=self.store_bokeh, backend='bokeh')
Store.current_backend = 'matplotlib'
Store._custom_options = {k:{} for k in Store._custom_options.keys()}
if self.plotly_options is not None:
Store._options['plotly'] = self.plotly_options
super(TestCrossBackendOptions, self).tearDown()
def test_mpl_bokeh_mpl(self):
img = Image(np.random.rand(10,10))
# Use blue in matplotlib
Store.current_backend = 'matplotlib'
StoreOptions.set_options(img, style={'Image':{'cmap':'Blues'}})
mpl_opts = Store.lookup_options('matplotlib', img, 'style').options
self.assertEqual(mpl_opts, {'cmap':'Blues'})
# Use purple in bokeh
Store.current_backend = 'bokeh'
StoreOptions.set_options(img, style={'Image':{'cmap':'Purple'}})
bokeh_opts = Store.lookup_options('bokeh', img, 'style').options
self.assertEqual(bokeh_opts, {'cmap':'Purple'})
# Check it is still blue in matplotlib...
Store.current_backend = 'matplotlib'
mpl_opts = Store.lookup_options('matplotlib', img, 'style').options
self.assertEqual(mpl_opts, {'cmap':'Blues'})
# And purple in bokeh..
Store.current_backend = 'bokeh'
bokeh_opts = Store.lookup_options('bokeh', img, 'style').options
self.assertEqual(bokeh_opts, {'cmap':'Purple'})
return img
def test_mpl_bokeh_offset_mpl(self):
img = Image(np.random.rand(10,10))
# Use blue in matplotlib
Store.current_backend = 'matplotlib'
StoreOptions.set_options(img, style={'Image':{'cmap':'Blues'}})
mpl_opts = Store.lookup_options('matplotlib', img, 'style').options
self.assertEqual(mpl_opts, {'cmap':'Blues'})
# Switch to bokeh and style a random object...
Store.current_backend = 'bokeh'
img2 = Image(np.random.rand(10,10))
StoreOptions.set_options(img2, style={'Image':{'cmap':'Reds'}})
img2_opts = Store.lookup_options('bokeh', img2, 'style').options
self.assertEqual(img2_opts, {'cmap':'Reds'})
# Use purple in bokeh on the object...
StoreOptions.set_options(img, style={'Image':{'cmap':'Purple'}})
bokeh_opts = Store.lookup_options('bokeh', img, 'style').options
self.assertEqual(bokeh_opts, {'cmap':'Purple'})
# Check it is still blue in matplotlib...
Store.current_backend = 'matplotlib'
mpl_opts = Store.lookup_options('matplotlib', img, 'style').options
self.assertEqual(mpl_opts, {'cmap':'Blues'})
# And purple in bokeh..
Store.current_backend = 'bokeh'
bokeh_opts = Store.lookup_options('bokeh', img, 'style').options
self.assertEqual(bokeh_opts, {'cmap':'Purple'})
return img
def test_builder_backend_switch(self):
Store.options(val=self.store_mpl, backend='matplotlib')
Store.options(val=self.store_bokeh, backend='bokeh')
Store.set_current_backend('bokeh')
self.assertEqual(opts.Curve.__doc__.startswith('Curve('), True)
docline = opts.Curve.__doc__.splitlines()[0]
dockeys = eval(docline.replace('Curve', 'dict'))
self.assertEqual('color' in dockeys, True)
self.assertEqual('line_width' in dockeys, True)
Store.set_current_backend('matplotlib')
self.assertEqual(opts.Curve.__doc__.startswith('Curve('), True)
docline = opts.Curve.__doc__.splitlines()[0]
dockeys = eval(docline.replace('Curve', 'dict'))
self.assertEqual('color' in dockeys, True)
self.assertEqual('linewidth' in dockeys, True)
def test_builder_cross_backend_validation(self):
Store.options(val=self.store_mpl, backend='matplotlib')
Store.options(val=self.store_bokeh, backend='bokeh')
Store.set_current_backend('bokeh')
opts.Curve(line_dash='dotted') # Bokeh keyword
opts.Curve(linewidth=10) # MPL keyword
err = ("In opts.Curve\(...\), keywords supplied are mixed across backends. "
"Keyword\(s\) 'linewidth' are invalid for bokeh, "
"'line_dash' are invalid for matplotlib")
with self.assertRaisesRegexp(ValueError, err):
opts.Curve(linewidth=10, line_dash='dotted') # Bokeh and MPL
# Non-existent keyword across backends (bokeh active)
err = ("In opts.Curve\(...\), unexpected option 'foobar' for Curve type "
"across all extensions. Similar options for current "
"extension \('bokeh'\) are: \['toolbar'\].")
with self.assertRaisesRegexp(ValueError, err):
opts.Curve(foobar=3)
# Non-existent keyword across backends (matplotlib active)
Store.set_current_backend('matplotlib')
err = ("In opts.Curve\(...\), unexpected option 'foobar' for Curve "
"type across all extensions. No similar options found.")
with self.assertRaisesRegexp(ValueError, err):
opts.Curve(foobar=3)
class TestCrossBackendOptionSpecification(ComparisonTestCase):
"""
Test the style system can style a single object across backends.
"""
def setUp(self):
if 'matplotlib' not in Store.renderers:
raise SkipTest("Cross background tests assumes matplotlib is available")
if 'bokeh' not in Store.renderers:
raise SkipTest("Cross background tests assumes bokeh is available.")
# Some tests require that plotly isn't loaded
self.plotly_options = Store._options.pop('plotly', None)
self.store_mpl = OptionTree(sorted(Store.options(backend='matplotlib').items()),
groups=Options._option_groups)
self.store_bokeh = OptionTree(sorted(Store.options(backend='bokeh').items()),
groups=Options._option_groups)
super(TestCrossBackendOptionSpecification, self).setUp()
def tearDown(self):
Store.options(val=self.store_mpl, backend='matplotlib')
Store.options(val=self.store_bokeh, backend='bokeh')
Store.current_backend = 'matplotlib'
Store._custom_options = {k:{} for k in Store._custom_options.keys()}
if self.plotly_options is not None:
Store._options['plotly'] = self.plotly_options
super(TestCrossBackendOptionSpecification, self).tearDown()
def assert_output_options_group_empty(self, obj):
mpl_output_lookup = Store.lookup_options('matplotlib', obj, 'output').options
self.assertEqual(mpl_output_lookup, {})
bokeh_output_lookup = Store.lookup_options('bokeh', obj, 'output').options
self.assertEqual(bokeh_output_lookup, {})
def test_mpl_bokeh_mpl_via_option_objects_opts_method(self):
img = Image(np.random.rand(10,10))
mpl_opts = Options('Image', cmap='Blues', backend='matplotlib')
bokeh_opts = Options('Image', cmap='Purple', backend='bokeh')
self.assertEqual(mpl_opts.kwargs['backend'], 'matplotlib')
self.assertEqual(bokeh_opts.kwargs['backend'], 'bokeh')
img.opts(mpl_opts, bokeh_opts)
mpl_lookup = Store.lookup_options('matplotlib', img, 'style').options
self.assertEqual(mpl_lookup['cmap'], 'Blues')
bokeh_lookup = Store.lookup_options('bokeh', img, 'style').options
self.assertEqual(bokeh_lookup['cmap'], 'Purple')
self.assert_output_options_group_empty(img)
def test_mpl_bokeh_mpl_via_builders_opts_method(self):
img = Image(np.random.rand(10,10))
mpl_opts = opts.Image(cmap='Blues', backend='matplotlib')
bokeh_opts = opts.Image(cmap='Purple', backend='bokeh')
self.assertEqual(mpl_opts.kwargs['backend'], 'matplotlib')
self.assertEqual(bokeh_opts.kwargs['backend'], 'bokeh')
img.opts(mpl_opts, bokeh_opts)
mpl_lookup = Store.lookup_options('matplotlib', img, 'style').options
self.assertEqual(mpl_lookup['cmap'], 'Blues')
bokeh_lookup = Store.lookup_options('bokeh', img, 'style').options
self.assertEqual(bokeh_lookup['cmap'], 'Purple')
self.assert_output_options_group_empty(img)
def test_mpl_bokeh_mpl_via_dict_backend_keyword(self):
curve = Curve([1,2,3])
styled_mpl = curve.opts({'Curve': dict(color='red')}, backend='matplotlib')
styled = styled_mpl.opts({'Curve': dict(color='green')}, backend='bokeh')
mpl_lookup = Store.lookup_options('matplotlib', styled, 'style')
self.assertEqual(mpl_lookup.kwargs['color'], 'red')
bokeh_lookup = Store.lookup_options('bokeh', styled, 'style')
self.assertEqual(bokeh_lookup.kwargs['color'], 'green')
def test_mpl_bokeh_mpl_via_builders_opts_method_implicit_backend(self):
img = Image(np.random.rand(10,10))
Store.set_current_backend('matplotlib')
mpl_opts = opts.Image(cmap='Blues')
bokeh_opts = opts.Image(cmap='Purple', backend='bokeh')
self.assertEqual('backend' not in mpl_opts.kwargs, True)
self.assertEqual(bokeh_opts.kwargs['backend'], 'bokeh')
img.opts(mpl_opts, bokeh_opts)
mpl_lookup = Store.lookup_options('matplotlib', img, 'style').options
self.assertEqual(mpl_lookup['cmap'], 'Blues')
bokeh_lookup = Store.lookup_options('bokeh', img, 'style').options
self.assertEqual(bokeh_lookup['cmap'], 'Purple')
self.assert_output_options_group_empty(img)
def test_mpl_bokeh_mpl_via_builders_opts_method_literal_implicit_backend(self):
img = Image(np.random.rand(10,10))
curve = Curve([1,2,3])
overlay = img * curve
Store.set_current_backend('matplotlib')
literal = {'Curve':
{'style':dict(color='orange')},
'Image':
{'style':dict(cmap='jet'), 'output':dict(backend='bokeh')}
}
styled = overlay.opts(literal)
mpl_curve_lookup = Store.lookup_options('matplotlib', styled.Curve.I, 'style')
self.assertEqual(mpl_curve_lookup.kwargs['color'], 'orange')
mpl_img_lookup = Store.lookup_options('matplotlib', styled.Image.I, 'style')
self.assertNotEqual(mpl_img_lookup.kwargs['cmap'], 'jet')
bokeh_curve_lookup = Store.lookup_options('bokeh', styled.Curve.I, 'style')
self.assertNotEqual(bokeh_curve_lookup.kwargs['color'], 'orange')
bokeh_img_lookup = Store.lookup_options('bokeh', styled.Image.I, 'style')
self.assertEqual(bokeh_img_lookup.kwargs['cmap'], 'jet')
def test_mpl_bokeh_mpl_via_builders_opts_method_literal_explicit_backend(self):
img = Image(np.random.rand(10,10))
curve = Curve([1,2,3])
overlay = img * curve
Store.set_current_backend('matplotlib')
literal = {'Curve':
{'style':dict(color='orange'), 'output':dict(backend='matplotlib')},
'Image':
{'style':dict(cmap='jet'), 'output':dict(backend='bokeh')}
}
styled = overlay.opts(literal)
mpl_curve_lookup = Store.lookup_options('matplotlib', styled.Curve.I, 'style')
self.assertEqual(mpl_curve_lookup.kwargs['color'], 'orange')
mpl_img_lookup = Store.lookup_options('matplotlib', styled.Image.I, 'style')
self.assertNotEqual(mpl_img_lookup.kwargs['cmap'], 'jet')
bokeh_curve_lookup = Store.lookup_options('bokeh', styled.Curve.I, 'style')
self.assertNotEqual(bokeh_curve_lookup.kwargs['color'], 'orange')
bokeh_img_lookup = Store.lookup_options('bokeh', styled.Image.I, 'style')
self.assertEqual(bokeh_img_lookup.kwargs['cmap'], 'jet')
def test_mpl_bokeh_mpl_via_builders_opts_method_flat_literal_explicit_backend(self):
img = Image(np.random.rand(10,10))
curve = Curve([1,2,3])
overlay = img * curve
Store.set_current_backend('matplotlib')
literal = {'Curve': dict(color='orange', backend='matplotlib'),
'Image': dict(cmap='jet', backend='bokeh')
}
styled = overlay.opts(literal)
mpl_curve_lookup = Store.lookup_options('matplotlib', styled.Curve.I, 'style')
self.assertEqual(mpl_curve_lookup.kwargs['color'], 'orange')
mpl_img_lookup = Store.lookup_options('matplotlib', styled.Image.I, 'style')
self.assertNotEqual(mpl_img_lookup.kwargs['cmap'], 'jet')
bokeh_curve_lookup = Store.lookup_options('bokeh', styled.Curve.I, 'style')
self.assertNotEqual(bokeh_curve_lookup.kwargs['color'], 'orange')
bokeh_img_lookup = Store.lookup_options('bokeh', styled.Image.I, 'style')
self.assertEqual(bokeh_img_lookup.kwargs['cmap'], 'jet')
def test_mpl_bokeh_output_options_group_expandable(self):
original_allowed_kws = Options._output_allowed_kws[:]
Options._output_allowed_kws = ['backend', 'file_format_example']
# Re-register
Store.register({Curve: plotting.mpl.CurvePlot}, 'matplotlib')
Store.register({Curve: plotting.bokeh.CurvePlot}, 'bokeh')
curve_bk = Options('Curve', backend='bokeh', color='blue')
curve_mpl = Options('Curve', backend='matplotlib', color='red',
file_format_example='SVG')
c = Curve([1,2,3])
styled = c.opts(curve_bk, curve_mpl)
self.assertEqual(Store.lookup_options('matplotlib', styled, 'output').kwargs,
{'backend':'matplotlib', 'file_format_example':'SVG'})
self.assertEqual(Store.lookup_options('bokeh', styled, 'output').kwargs,
{})
Options._output_allowed_kws = original_allowed_kws
class TestCrossBackendOptionPickling(TestCrossBackendOptions):
cleanup = ['test_raw_pickle.pkl', 'test_pickle_mpl_bokeh.pkl']
def tearDown(self):
super(TestCrossBackendOptionPickling, self).tearDown()
for f in self.cleanup:
try:
os.remove(f)
except:
pass
def test_raw_pickle(self):
"""
Test usual pickle saving and loading (no style information preserved)
"""
fname= 'test_raw_pickle.pkl'
raw = super(TestCrossBackendOptionPickling, self).test_mpl_bokeh_mpl()
pickle.dump(raw, open(fname,'wb'))
self.clear_options()
img = pickle.load(open(fname,'rb'))
# Data should match
self.assertEqual(raw, img)
# But the styles will be lost without using Store.load/Store.dump
pickle.current_backend = 'matplotlib'
mpl_opts = Store.lookup_options('matplotlib', img, 'style').options
self.assertEqual(mpl_opts, {})
# ... across all backends
Store.current_backend = 'bokeh'
bokeh_opts = Store.lookup_options('bokeh', img, 'style').options
self.assertEqual(bokeh_opts, {})
def test_pickle_mpl_bokeh(self):
"""
Test pickle saving and loading with Store (style information preserved)
"""
fname = 'test_pickle_mpl_bokeh.pkl'
raw = super(TestCrossBackendOptionPickling, self).test_mpl_bokeh_mpl()
Store.dump(raw, open(fname,'wb'))
self.clear_options()
img = Store.load(open(fname,'rb'))
# Data should match
self.assertEqual(raw, img)
# Check it is still blue in matplotlib...
Store.current_backend = 'matplotlib'
mpl_opts = Store.lookup_options('matplotlib', img, 'style').options
self.assertEqual(mpl_opts, {'cmap':'Blues'})
# And purple in bokeh..
Store.current_backend = 'bokeh'
bokeh_opts = Store.lookup_options('bokeh', img, 'style').options
self.assertEqual(bokeh_opts, {'cmap':'Purple'})
| bsd-3-clause |
hpssjellis/easy-tensorflow-on-cloud9 | aymericdamien-Examples/examples/linear_regression.py | 7 | 2600 | '''
A linear regression learning algorithm example using TensorFlow library.
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
import tensorflow as tf
import numpy
import matplotlib.pyplot as plt
rng = numpy.random
# Parameters
learning_rate = 0.01
training_epochs = 2000
display_step = 50
# Training Data
train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1])
train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3])
n_samples = train_X.shape[0]
# tf Graph Input
X = tf.placeholder("float")
Y = tf.placeholder("float")
# Create Model
# Set model weights
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
# Construct a linear model
activation = tf.add(tf.mul(X, W), b)
# Minimize the squared errors
cost = tf.reduce_sum(tf.pow(activation-Y, 2))/(2*n_samples) #L2 loss
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) #Gradient descent
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
#Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(sess.run(cost, feed_dict={X: train_X, Y:train_Y})), \
"W=", sess.run(W), "b=", sess.run(b)
print "Optimization Finished!"
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print "Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n'
# Testing example, as requested (Issue #2)
test_X = numpy.asarray([6.83,4.668,8.9,7.91,5.7,8.7,3.1,2.1])
test_Y = numpy.asarray([1.84,2.273,3.2,2.831,2.92,3.24,1.35,1.03])
print "Testing... (L2 loss Comparison)"
testing_cost = sess.run(tf.reduce_sum(tf.pow(activation-Y, 2))/(2*test_X.shape[0]),
feed_dict={X: test_X, Y: test_Y}) #same function as cost above
print "Testing cost=", testing_cost
print "Absolute l2 loss difference:", abs(training_cost - testing_cost)
#Graphic display
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(test_X, test_Y, 'bo', label='Testing data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
plt.legend()
plt.show() | mit |
thisisev/ThinkStats2 | code/hinc_soln.py | 67 | 4296 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import pandas
import hinc
import thinkplot
import thinkstats2
"""This file contains a solution to an exercise in Think Stats:
The distributions of wealth and income are sometimes modeled using
lognormal and Pareto distributions. To see which is better, let's
look at some data.
The Current Population Survey (CPS) is joint effort of the Bureau
of Labor Statistics and the Census Bureau to study income and related
variables. Data collected in 2013 is available from
http://www.census.gov/hhes/www/cpstables/032013/hhinc/toc.htm.
I downloaded hinc06.xls, which is an Excel spreadsheet with
information about household income, and converted it to hinc06.csv,
a CSV file you will find in the repository for this book. You
will also find hinc.py, which reads the CSV file.
Extract the distribution of incomes from this dataset. Are any of the
analytic distributions in this chapter a good model of the data? A
solution to this exercise is in hinc_soln.py.
My solution generates three figures:
1) The CDF of income on a linear scale.
2) The CCDF on a log-log scale along with a Pareto model intended
to match the tail behavior.
3) The CDF on a log-x scale along with a lognormal model chose to
match the median and inter-quartile range.
My conclusions based on these figures are:
1) The Pareto model is probably a reasonable choice for the top
10-20% of incomes.
2) The lognormal model captures the shape of the distribution better,
but the data deviate substantially from the model. With different
choices for sigma, you could match the upper or lower tail, but not
both at the same time.
In summary I would say that neither model captures the whole distribution,
so you might have to
1) look for another analytic model,
2) choose one that captures the part of the distribution that is most
relevent, or
3) avoid using an analytic model altogether.
"""
class SmoothCdf(thinkstats2.Cdf):
"""Represents a CDF based on calculated quantiles.
"""
def Render(self):
"""Because this CDF was not computed from a sample, it
should not be rendered as a step function.
"""
return self.xs, self.ps
def Prob(self, x):
"""Compute CDF(x), interpolating between known values.
"""
return np.interp(x, self.xs, self.ps)
def Value(self, p):
"""Compute inverse CDF(x), interpolating between probabilities.
"""
return np.interp(p, self.ps, self.xs)
def MakeFigures(df):
"""Plots the CDF of income in several forms.
"""
xs, ps = df.income.values, df.ps.values
cdf = SmoothCdf(xs, ps, label='data')
cdf_log = SmoothCdf(np.log10(xs), ps, label='data')
# linear plot
thinkplot.Cdf(cdf)
thinkplot.Save(root='hinc_linear',
xlabel='household income',
ylabel='CDF')
# pareto plot
# for the model I chose parameters by hand to fit the tail
xs, ys = thinkstats2.RenderParetoCdf(xmin=55000, alpha=2.5,
low=0, high=250000)
thinkplot.Plot(xs, 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf, complement=True)
thinkplot.Save(root='hinc_pareto',
xlabel='log10 household income',
ylabel='CCDF',
xscale='log',
yscale='log')
# lognormal plot
# for the model I estimate mu and sigma using
# percentile-based statistics
median = cdf_log.Percentile(50)
iqr = cdf_log.Percentile(75) - cdf_log.Percentile(25)
std = iqr / 1.349
# choose std to match the upper tail
std = 0.35
print(median, std)
xs, ps = thinkstats2.RenderNormalCdf(median, std, low=3.5, high=5.5)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Save(root='hinc_normal',
xlabel='log10 household income',
ylabel='CDF')
def main():
df = hinc.ReadData()
MakeFigures(df)
if __name__ == "__main__":
main()
| gpl-3.0 |
kambysese/mne-python | mne/fixes.py | 4 | 37120 | """Compatibility fixes for older versions of libraries
If you add content to this file, please give the version of the package
at which the fix is no longer needed.
# originally copied from scikit-learn
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <fpedregosa@acm.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD
from distutils.version import LooseVersion
import functools
import inspect
from math import log
import os
from pathlib import Path
import warnings
import numpy as np
###############################################################################
# Misc
def _median_complex(data, axis):
"""Compute marginal median on complex data safely.
XXX: Can be removed when numpy introduces a fix.
See: https://github.com/scipy/scipy/pull/12676/.
"""
# np.median must be passed real arrays for the desired result
if np.iscomplexobj(data):
data = (np.median(np.real(data), axis=axis)
+ 1j * np.median(np.imag(data), axis=axis))
else:
data = np.median(data, axis=axis)
return data
# helpers to get function arguments
def _get_args(function, varargs=False):
params = inspect.signature(function).parameters
args = [key for key, param in params.items()
if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)]
if varargs:
varargs = [param.name for param in params.values()
if param.kind == param.VAR_POSITIONAL]
if len(varargs) == 0:
varargs = None
return args, varargs
else:
return args
def _safe_svd(A, **kwargs):
"""Wrapper to get around the SVD did not converge error of death"""
# Intel has a bug with their GESVD driver:
# https://software.intel.com/en-us/forums/intel-distribution-for-python/topic/628049 # noqa: E501
# For SciPy 0.18 and up, we can work around it by using
# lapack_driver='gesvd' instead.
from scipy import linalg
if kwargs.get('overwrite_a', False):
raise ValueError('Cannot set overwrite_a=True with this function')
try:
return linalg.svd(A, **kwargs)
except np.linalg.LinAlgError as exp:
from .utils import warn
if 'lapack_driver' in _get_args(linalg.svd):
warn('SVD error (%s), attempting to use GESVD instead of GESDD'
% (exp,))
return linalg.svd(A, lapack_driver='gesvd', **kwargs)
else:
raise
def _csc_matrix_cast(x):
from scipy.sparse import csc_matrix
return csc_matrix(x)
###############################################################################
# Backporting nibabel's read_geometry
def _get_read_geometry():
"""Get the geometry reading function."""
try:
import nibabel as nib
has_nibabel = True
except ImportError:
has_nibabel = False
if has_nibabel:
from nibabel.freesurfer import read_geometry
else:
read_geometry = _read_geometry
return read_geometry
def _read_geometry(filepath, read_metadata=False, read_stamp=False):
"""Backport from nibabel."""
from .surface import _fread3, _fread3_many
volume_info = dict()
TRIANGLE_MAGIC = 16777214
QUAD_MAGIC = 16777215
NEW_QUAD_MAGIC = 16777213
with open(filepath, "rb") as fobj:
magic = _fread3(fobj)
if magic in (QUAD_MAGIC, NEW_QUAD_MAGIC): # Quad file
nvert = _fread3(fobj)
nquad = _fread3(fobj)
(fmt, div) = (">i2", 100.) if magic == QUAD_MAGIC else (">f4", 1.)
coords = np.fromfile(fobj, fmt, nvert * 3).astype(np.float64) / div
coords = coords.reshape(-1, 3)
quads = _fread3_many(fobj, nquad * 4)
quads = quads.reshape(nquad, 4)
#
# Face splitting follows
#
faces = np.zeros((2 * nquad, 3), dtype=np.int64)
nface = 0
for quad in quads:
if (quad[0] % 2) == 0:
faces[nface] = quad[0], quad[1], quad[3]
nface += 1
faces[nface] = quad[2], quad[3], quad[1]
nface += 1
else:
faces[nface] = quad[0], quad[1], quad[2]
nface += 1
faces[nface] = quad[0], quad[2], quad[3]
nface += 1
elif magic == TRIANGLE_MAGIC: # Triangle file
create_stamp = fobj.readline().rstrip(b'\n').decode('utf-8')
fobj.readline()
vnum = np.fromfile(fobj, ">i4", 1)[0]
fnum = np.fromfile(fobj, ">i4", 1)[0]
coords = np.fromfile(fobj, ">f4", vnum * 3).reshape(vnum, 3)
faces = np.fromfile(fobj, ">i4", fnum * 3).reshape(fnum, 3)
if read_metadata:
volume_info = _read_volume_info(fobj)
else:
raise ValueError("File does not appear to be a Freesurfer surface")
coords = coords.astype(np.float64) # XXX: due to mayavi bug on mac 32bits
ret = (coords, faces)
if read_metadata:
if len(volume_info) == 0:
warnings.warn('No volume information contained in the file')
ret += (volume_info,)
if read_stamp:
ret += (create_stamp,)
return ret
###############################################################################
# Triaging FFT functions to get fast pocketfft (SciPy 1.4)
@functools.lru_cache(None)
def _import_fft(name):
single = False
if not isinstance(name, tuple):
name = (name,)
single = True
try:
from scipy.fft import rfft # noqa analysis:ignore
except ImportError:
from numpy import fft # noqa
else:
from scipy import fft # noqa
out = [getattr(fft, n) for n in name]
if single:
out = out[0]
return out
###############################################################################
# NumPy Generator (NumPy 1.17)
def rng_uniform(rng):
"""Get the unform/randint from the rng."""
# prefer Generator.integers, fall back to RandomState.randint
return getattr(rng, 'integers', getattr(rng, 'randint', None))
def _validate_sos(sos):
"""Helper to validate a SOS input"""
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
if not (sos[:, 3] == 1).all():
raise ValueError('sos[:, 3] should be all ones')
return sos, n_sections
###############################################################################
# Misc utilities
# get_fdata() requires knowing the dtype ahead of time, so let's triage on our
# own instead
def _get_img_fdata(img):
data = np.asanyarray(img.dataobj)
dtype = np.complex128 if np.iscomplexobj(data) else np.float64
return data.astype(dtype)
def _read_volume_info(fobj):
"""An implementation of nibabel.freesurfer.io._read_volume_info, since old
versions of nibabel (<=2.1.0) don't have it.
"""
volume_info = dict()
head = np.fromfile(fobj, '>i4', 1)
if not np.array_equal(head, [20]): # Read two bytes more
head = np.concatenate([head, np.fromfile(fobj, '>i4', 2)])
if not np.array_equal(head, [2, 0, 20]):
warnings.warn("Unknown extension code.")
return volume_info
volume_info['head'] = head
for key in ['valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras',
'zras', 'cras']:
pair = fobj.readline().decode('utf-8').split('=')
if pair[0].strip() != key or len(pair) != 2:
raise IOError('Error parsing volume info.')
if key in ('valid', 'filename'):
volume_info[key] = pair[1].strip()
elif key == 'volume':
volume_info[key] = np.array(pair[1].split()).astype(int)
else:
volume_info[key] = np.array(pair[1].split()).astype(float)
# Ignore the rest
return volume_info
def _serialize_volume_info(volume_info):
"""An implementation of nibabel.freesurfer.io._serialize_volume_info, since
old versions of nibabel (<=2.1.0) don't have it."""
keys = ['head', 'valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras',
'zras', 'cras']
diff = set(volume_info.keys()).difference(keys)
if len(diff) > 0:
raise ValueError('Invalid volume info: %s.' % diff.pop())
strings = list()
for key in keys:
if key == 'head':
if not (np.array_equal(volume_info[key], [20]) or np.array_equal(
volume_info[key], [2, 0, 20])):
warnings.warn("Unknown extension code.")
strings.append(np.array(volume_info[key], dtype='>i4').tobytes())
elif key in ('valid', 'filename'):
val = volume_info[key]
strings.append('{} = {}\n'.format(key, val).encode('utf-8'))
elif key == 'volume':
val = volume_info[key]
strings.append('{} = {} {} {}\n'.format(
key, val[0], val[1], val[2]).encode('utf-8'))
else:
val = volume_info[key]
strings.append('{} = {:0.10g} {:0.10g} {:0.10g}\n'.format(
key.ljust(6), val[0], val[1], val[2]).encode('utf-8'))
return b''.join(strings)
##############################################################################
# adapted from scikit-learn
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a classifier and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Returns True if the given estimator is (probably) a regressor.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a regressor and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "regressor"
_DEFAULT_TAGS = {
'non_deterministic': False,
'requires_positive_X': False,
'requires_positive_y': False,
'X_types': ['2darray'],
'poor_score': False,
'no_validation': False,
'multioutput': False,
"allow_nan": False,
'stateless': False,
'multilabel': False,
'_skip_test': False,
'_xfail_checks': False,
'multioutput_only': False,
'binary_only': False,
'requires_fit': True,
'preserves_dtype': [np.float64],
'requires_y': False,
'pairwise': False,
}
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn.
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = inspect.signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : bool, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The latter have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Parameters
----------
**params : dict
Parameters.
Returns
-------
inst : instance
The object.
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in params.items():
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
from sklearn.base import _pprint
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
# __getstate__ and __setstate__ are omitted because they only contain
# conditionals that are not satisfied by our objects (e.g.,
# ``if type(self).__module__.startswith('sklearn.')``.
def _more_tags(self):
return _DEFAULT_TAGS
def _get_tags(self):
collected_tags = {}
for base_class in reversed(inspect.getmro(self.__class__)):
if hasattr(base_class, '_more_tags'):
# need the if because mixins might not have _more_tags
# but might do redundant work in estimators
# (i.e. calling more tags on BaseEstimator multiple times)
more_tags = base_class._more_tags(self)
collected_tags.update(more_tags)
return collected_tags
# newer sklearn deprecates importing from sklearn.metrics.scoring,
# but older sklearn does not expose check_scoring in sklearn.metrics.
def _get_check_scoring():
try:
from sklearn.metrics import check_scoring # noqa
except ImportError:
from sklearn.metrics.scorer import check_scoring # noqa
return check_scoring
def _check_fit_params(X, fit_params, indices=None):
"""Check and validate the parameters passed during `fit`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data array.
fit_params : dict
Dictionary containing the parameters passed at fit.
indices : array-like of shape (n_samples,), default=None
Indices to be selected if the parameter has the same size as
`X`.
Returns
-------
fit_params_validated : dict
Validated parameters. We ensure that the values support
indexing.
"""
try:
from sklearn.utils.validation import \
_check_fit_params as _sklearn_check_fit_params
return _sklearn_check_fit_params(X, fit_params, indices)
except ImportError:
from sklearn.model_selection import _validation
fit_params_validated = \
{k: _validation._index_param_value(X, v, indices)
for k, v in fit_params.items()}
return fit_params_validated
###############################################################################
# Copied from sklearn to simplify code paths
def empirical_covariance(X, assume_centered=False):
"""Computes the Maximum likelihood covariance estimator
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
Empirical covariance (Maximum Likelihood Estimator).
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
if covariance.ndim == 0:
covariance = np.array([[covariance]])
return covariance
class EmpiricalCovariance(BaseEstimator):
"""Maximum likelihood covariance estimator
Read more in the :ref:`User Guide <covariance>`.
Parameters
----------
store_precision : bool
Specifies if the estimated precision is stored.
assume_centered : bool
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
covariance_ : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix
precision_ : 2D ndarray, shape (n_features, n_features)
Estimated pseudo-inverse matrix.
(stored only if store_precision is True)
"""
def __init__(self, store_precision=True, assume_centered=False):
self.store_precision = store_precision
self.assume_centered = assume_centered
def _set_covariance(self, covariance):
"""Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
----------
covariance : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.
"""
from scipy import linalg
# covariance = check_array(covariance)
# set covariance
self.covariance_ = covariance
# set precision
if self.store_precision:
self.precision_ = linalg.pinvh(covariance)
else:
self.precision_ = None
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
precision_ : array-like,
The precision matrix associated to the current covariance object.
"""
from scipy import linalg
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_)
return precision
def fit(self, X, y=None):
"""Fit the Maximum Likelihood Estimator covariance model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples and
n_features is the number of features.
y : ndarray | None
Not used, present for API consistency.
Returns
-------
self : object
Returns self.
""" # noqa: E501
# X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(
X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
def score(self, X_test, y=None):
"""Compute the log-likelihood of a Gaussian dataset.
Uses ``self.covariance_`` as an estimator of its covariance matrix.
Parameters
----------
X_test : array-like, shape = [n_samples, n_features]
Test data of which we compute the likelihood, where n_samples is
the number of samples and n_features is the number of features.
X_test is assumed to be drawn from the same distribution than
the data used in fit (including centering).
y : ndarray | None
Not used, present for API consistency.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
# compute empirical covariance of the test set
test_cov = empirical_covariance(
X_test - self.location_, assume_centered=True)
# compute log likelihood
res = log_likelihood(test_cov, self.get_precision())
return res
def error_norm(self, comp_cov, norm='frobenius', scaling=True,
squared=True):
"""Computes the Mean Squared Error between two covariance estimators.
Parameters
----------
comp_cov : array-like, shape = [n_features, n_features]
The covariance to compare with.
norm : str
The type of norm used to compute the error. Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
The Mean Squared Error (in the sense of the Frobenius norm) between
`self` and `comp_cov` covariance estimators.
"""
from scipy import linalg
# compute the error
error = comp_cov - self.covariance_
# compute the error norm
if norm == "frobenius":
squared_norm = np.sum(error ** 2)
elif norm == "spectral":
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError(
"Only spectral and frobenius norms are implemented")
# optionally scale the error norm
if scaling:
squared_norm = squared_norm / error.shape[0]
# finally get either the squared norm or the norm
if squared:
result = squared_norm
else:
result = np.sqrt(squared_norm)
return result
def mahalanobis(self, observations):
"""Computes the squared Mahalanobis distances of given observations.
Parameters
----------
observations : array-like, shape = [n_observations, n_features]
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit.
Returns
-------
mahalanobis_distance : array, shape = [n_observations,]
Squared Mahalanobis distances of the observations.
"""
precision = self.get_precision()
# compute mahalanobis distances
centered_obs = observations - self.location_
mahalanobis_dist = np.sum(
np.dot(centered_obs, precision) * centered_obs, 1)
return mahalanobis_dist
def log_likelihood(emp_cov, precision):
"""Computes the sample mean of the log_likelihood under a covariance model
computes the empirical expected log-likelihood (accounting for the
normalization terms and scaling), allowing for universal comparison (beyond
this software package)
Parameters
----------
emp_cov : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the covariance model to be tested
Returns
-------
sample mean of the log-likelihood
"""
p = precision.shape[0]
log_likelihood_ = - np.sum(emp_cov * precision) + _logdet(precision)
log_likelihood_ -= p * np.log(2 * np.pi)
log_likelihood_ /= 2.
return log_likelihood_
# sklearn uses np.linalg for this, but ours is more robust to zero eigenvalues
def _logdet(A):
"""Compute the log det of a positive semidefinite matrix."""
from scipy import linalg
vals = linalg.eigvalsh(A)
# avoid negative (numerical errors) or zero (semi-definite matrix) values
tol = vals.max() * vals.size * np.finfo(np.float64).eps
vals = np.where(vals > tol, vals, tol)
return np.sum(np.log(vals))
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
def _assess_dimension_(spectrum, rank, n_samples, n_features):
from scipy.special import gammaln
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.) -
log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def svd_flip(u, v, u_based_decision=True):
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, np.arange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[np.arange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):
"""Use high precision for cumsum and check that final value matches sum
Parameters
----------
arr : array-like
To be cumulatively summed as flat
axis : int, optional
Axis along which the cumulative sum is computed.
The default (None) is to compute the cumsum over the flattened array.
rtol : float
Relative tolerance, see ``np.allclose``
atol : float
Absolute tolerance, see ``np.allclose``
"""
out = np.cumsum(arr, axis=axis, dtype=np.float64)
expected = np.sum(arr, axis=axis, dtype=np.float64)
if not np.all(np.isclose(out.take(-1, axis=axis), expected, rtol=rtol,
atol=atol, equal_nan=True)):
warnings.warn('cumsum was found to be unstable: '
'its last element does not correspond to sum',
RuntimeWarning)
return out
# This shim can be removed once NumPy 1.19.0+ is required (1.18.4 has sign bug)
def svd(a, hermitian=False):
if hermitian: # faster
s, u = np.linalg.eigh(a)
sgn = np.sign(s)
s = np.abs(s)
sidx = np.argsort(s)[..., ::-1]
sgn = take_along_axis(sgn, sidx, axis=-1)
s = take_along_axis(s, sidx, axis=-1)
u = take_along_axis(u, sidx[..., None, :], axis=-1)
# singular values are unsigned, move the sign into v
vt = (u * sgn[..., np.newaxis, :]).swapaxes(-2, -1).conj()
np.abs(s, out=s)
return u, s, vt
else:
return np.linalg.svd(a)
###############################################################################
# NumPy einsum backward compat (allow "optimize" arg and fix 1.14.0 bug)
# XXX eventually we should hand-tune our `einsum` calls given our array sizes!
def einsum(*args, **kwargs):
if 'optimize' not in kwargs:
kwargs['optimize'] = False
return np.einsum(*args, **kwargs)
try:
from numpy import take_along_axis
except ImportError: # NumPy < 1.15
def take_along_axis(arr, indices, axis):
# normalize inputs
if axis is None:
arr = arr.flat
arr_shape = (len(arr),) # flatiter has no .shape
axis = 0
else:
# there is a NumPy function for this, but rather than copy our
# internal uses should be correct, so just normalize quickly
if axis < 0:
axis += arr.ndim
assert 0 <= axis < arr.ndim
arr_shape = arr.shape
# use the fancy index
return arr[_make_along_axis_idx(arr_shape, indices, axis)]
def _make_along_axis_idx(arr_shape, indices, axis):
# compute dimensions to iterate over
if not np.issubdtype(indices.dtype, np.integer):
raise IndexError('`indices` must be an integer array')
if len(arr_shape) != indices.ndim:
raise ValueError(
"`indices` and `arr` must have the same number of dimensions")
shape_ones = (1,) * indices.ndim
dest_dims = list(range(axis)) + [None] + list(range(axis+1, indices.ndim))
# build a fancy index, consisting of orthogonal aranges, with the
# requested index inserted at the right location
fancy_index = []
for dim, n in zip(dest_dims, arr_shape):
if dim is None:
fancy_index.append(indices)
else:
ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:]
fancy_index.append(np.arange(n).reshape(ind_shape))
return tuple(fancy_index)
###############################################################################
# From nilearn
def _crop_colorbar(cbar, cbar_vmin, cbar_vmax):
"""
crop a colorbar to show from cbar_vmin to cbar_vmax
Used when symmetric_cbar=False is used.
"""
import matplotlib
if (cbar_vmin is None) and (cbar_vmax is None):
return
cbar_tick_locs = cbar.locator.locs
if cbar_vmax is None:
cbar_vmax = cbar_tick_locs.max()
if cbar_vmin is None:
cbar_vmin = cbar_tick_locs.min()
new_tick_locs = np.linspace(cbar_vmin, cbar_vmax,
len(cbar_tick_locs))
# matplotlib >= 3.2.0 no longer normalizes axes between 0 and 1
# See https://matplotlib.org/3.2.1/api/prev_api_changes/api_changes_3.2.0.html
# _outline was removed in
# https://github.com/matplotlib/matplotlib/commit/03a542e875eba091a027046d5ec652daa8be6863
# so we use the code from there
if LooseVersion(matplotlib.__version__) >= LooseVersion("3.2.0"):
cbar.ax.set_ylim(cbar_vmin, cbar_vmax)
X, _ = cbar._mesh()
X = np.array([X[0], X[-1]])
Y = np.array([[cbar_vmin, cbar_vmin], [cbar_vmax, cbar_vmax]])
N = X.shape[0]
ii = [0, 1, N - 2, N - 1, 2 * N - 1, 2 * N - 2, N + 1, N, 0]
x = X.T.reshape(-1)[ii]
y = Y.T.reshape(-1)[ii]
xy = (np.column_stack([y, x])
if cbar.orientation == 'horizontal' else
np.column_stack([x, y]))
cbar.outline.set_xy(xy)
else:
cbar.ax.set_ylim(cbar.norm(cbar_vmin), cbar.norm(cbar_vmax))
outline = cbar.outline.get_xy()
outline[:2, 1] += cbar.norm(cbar_vmin)
outline[2:6, 1] -= (1. - cbar.norm(cbar_vmax))
outline[6:, 1] += cbar.norm(cbar_vmin)
cbar.outline.set_xy(outline)
cbar.set_ticks(new_tick_locs, update_ticks=True)
###############################################################################
# Matplotlib
def _get_status(checks):
"""Deal with old MPL to get check box statuses."""
try:
return list(checks.get_status())
except AttributeError:
return [x[0].get_visible() for x in checks.lines]
###############################################################################
# Numba (optional requirement)
# Here we choose different defaults to speed things up by default
try:
import numba
if LooseVersion(numba.__version__) < LooseVersion('0.40'):
raise ImportError
prange = numba.prange
def jit(nopython=True, nogil=True, fastmath=True, cache=True,
**kwargs): # noqa
return numba.jit(nopython=nopython, nogil=nogil, fastmath=fastmath,
cache=cache, **kwargs)
except ImportError:
has_numba = False
else:
has_numba = (os.getenv('MNE_USE_NUMBA', 'true').lower() == 'true')
if not has_numba:
def jit(**kwargs): # noqa
def _jit(func):
return func
return _jit
prange = range
bincount = np.bincount
mean = np.mean
else:
@jit()
def bincount(x, weights, minlength): # noqa: D103
out = np.zeros(minlength)
for idx, w in zip(x, weights):
out[idx] += w
return out
# fix because Numba does not support axis kwarg for mean
@jit()
def _np_apply_along_axis(func1d, axis, arr):
assert arr.ndim == 2
assert axis in [0, 1]
if axis == 0:
result = np.empty(arr.shape[1])
for i in range(len(result)):
result[i] = func1d(arr[:, i])
else:
result = np.empty(arr.shape[0])
for i in range(len(result)):
result[i] = func1d(arr[i, :])
return result
@jit()
def mean(array, axis):
return _np_apply_along_axis(np.mean, axis, array)
###############################################################################
# Added in Python 3.7 (remove when we drop support for 3.6)
try:
from contextlib import nullcontext
except ImportError:
from contextlib import contextmanager
@contextmanager
def nullcontext(enter_result=None):
yield enter_result
| bsd-3-clause |
Roibal/Geotechnical_Engineering_Python_Code | Example-Code/Tunneling_Stresses.py | 1 | 3861 | import math
import matplotlib.pyplot as plt
import numpy as np
#The purpose of this program is to calculate the Radial, Tangential and Shear Stress in a circular tunnel given input parameters.
def RadialStress(Pz, Diameter, k, Theta, r, Pi):
"""
RadialStress Function will Return the Radial Stress at a given point (r, Theta) and input parameters (Pz, Diameter, k, Pi)
"""
a = Diameter/2
Theta = math.radians(Theta)
return (1/2)*Pz*((1+k)*(1-(a**2)/(r**2))+(1-k)*(1-4*(a**2)/(r**2)+3*(a**4)/(r**4))*math.cos(2*Theta))+Pi*((a**2)/(r**2))
def TangentialStress(Pz, Diameter, k, Theta, r, Pi):
"""
TangentialStress Function will Return the Tangential Stress given point (r, Theta) and input parameters (Pz, Diameter, k, Pi)
"""
a = Diameter/2
Theta = math.radians(Theta)
return (1/2)*Pz*((1+k)*(1+(a**2)/(r**2))-(1-k)*(1+3*(a**4)/(r**4))*math.cos(2*Theta))-Pi*((a**2)/(r**2))
def ShearStress(Pz, Diameter, k, Theta, r):
"""
ShearStress Function will Return the Tangential Stress given point (r, Theta) and input parameters (Pz, Diameter, k, Pi)
"""
a = Diameter/2
Theta = math.radians(Theta)
return (1/2)*Pz*(-(1-k)*(1+2*(a**2)/(r**2)-3*(a**4)/(r**4))*math.sin(2*Theta))
def OverStressZone(RadStress, TanStress, ShrStress, c=800, phi=32):
#
UCompStr = 2*c*math.cos(math.radians(phi))/(1-math.sin(math.radians(phi)))
Sigma1 = (1/2)*(RadStress+TanStress)+(((1/4)*(RadStress-TanStress)**2+ShrStress^2))
if Sigma1>UCompStr:
return 1
else:
return 0
def main():
for k in [0.5, 1, 2.5]:
RadStress = []
TanStress = []
ShrStress = []
r_list = []
Pz = 2500 * 1.1
Pi = 0
Diameter = 10
Theta = 90
#Determine Stresses for Varying r and Plot Resulting Stresses
for r in range(5,60,1):
RadStress.append(RadialStress(Pz, Diameter, k, Theta, r, Pi))
TanStress.append(TangentialStress(Pz, Diameter, k, Theta, r, Pi))
ShrStress.append(ShearStress(Pz, Diameter, k, Theta, r))
r_list.append(r)
plt.figure(1)
plt.plot(r_list, RadStress, '|-', label='Radial Stress, K= {}'.format(k))
plt.plot(r_list, TanStress, label='Tangential Stress, K= {}'.format(k))
plt.xlabel('Feet')
plt.ylabel('Stress (PSI)')
plt.title('Tangential and Radial Stresses')
plt.legend()
#Graphing for Internal Pressure = 500 PSI
for k in [0.5, 1, 2.5]:
Pi = 500
RadStress = []
TanStress = []
ShrStress = []
r_list = []
#Determine Stresses for K = 0.5 and Plot Resulting Stresses
for r in range(5,60,1):
RadStress.append(RadialStress(Pz, Diameter, k, Theta, r, Pi))
TanStress.append(TangentialStress(Pz, Diameter, k, Theta, r, Pi))
ShrStress.append(ShearStress(Pz, Diameter, k, Theta, r))
r_list.append(r)
plt.figure(2)
#plt.subplot(311)
plt.plot(r_list, RadStress, '|-', label='Radial Stress, K= {}'.format(k))
plt.plot(r_list, TanStress, label='Tangential Stress, K= {}'.format(k))
plt.xlabel('Feet')
plt.ylabel('Stress (PSI)')
plt.title('Tangential and Radial Stresses, Internal Pressure of 500')
plt.legend()
plt.show()
#Problem 2
RadStress2 = []
TanStress2 = []
ShrStress2 = []
r_list2 = []
Pz = 2500 * 1.1
Pi = 0
Diameter = 10
k = 0.5
Theta = 0
#for r in range(5, 10, 1):
#RadStress2.append(RadialStress(Pz, Diameter, k, Theta, r, Pi))
#TanStress2.append(TangentialStress(Pz, Diameter, k, Theta, r, Pi))
#ShrStress2.append(ShearStress(Pz, Diameter, k, Theta, r))
#r_list2.append(r)
if __name__ == "__main__":
main()
| mit |
xzh86/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 216 | 8091 | from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
| bsd-3-clause |
adamsumm/MarioAI | Mario Levels/levelParse.py | 1 | 8093 | #! python
#/c/Anaconda/python
import cv2
import json
import numpy as np
import matplotlib.pyplot as plt
import json
def parseLevel(level,tiles):
level = cv2.imread(level)
solidTiles = ['00','10','33','66','99','269','270','271','434','435','436','437','438','439','450'];
powerupTiles = ['Mushroom','MushroomHidden'];
breakableTiles = ['01','03','67','68','69'];
goodTile = ['24','90'];
enemyTiles = ['turtle1','turtle2','fly1','fly2','fly3','fly4','pirannha','turtle1b','turtle2b','fly1b','fly2b','fly3b','fly4b','pirannha2','goomba','goomba2'];
bullets = ['09','75'];
warpPipe = ['pipe_ul']
coins = ['57','58','123'];
levelMap = np.zeros((level.shape[0]/16,level.shape[1]/16));
solidTiles = map(lambda str: 'Tiles/tileset_tile' + str + '.png',solidTiles)
solidTiles = map( cv2.imread,solidTiles)
solidLocations = findSubImageLocations(level,solidTiles,0.85)
powerupTiles = map(lambda str: 'Tiles/tileset_tile' + str + '.png',powerupTiles)
powerupTiles = map( cv2.imread,powerupTiles)
puLocations = findSubImageLocations(level,powerupTiles,0.85)
breakableTiles = map(lambda str: 'Tiles/tileset_tile' + str + '.png',breakableTiles)
breakableTiles = map( cv2.imread,breakableTiles)
breakLocations = findSubImageLocations(level,breakableTiles,0.85)
goodTile = map(lambda str: 'Tiles/tileset_tile' + str + '.png',goodTile)
goodTile = map( cv2.imread,goodTile)
goodLocations = findSubImageLocations(level,goodTile,0.85)
enemyTiles = map(lambda str: 'Tiles/'+ str + '.png',enemyTiles)
enemyTiles = map( cv2.imread,enemyTiles)
enemyLocations = findSubImageLocations(level,enemyTiles,0.4)
warpPipe = map(lambda str: 'Tiles/'+ str + '.png',warpPipe)
warpPipe = map( cv2.imread,warpPipe)
pipeLocations = findSubImageLocations(level,warpPipe,0.85)
coins = map(lambda str: 'Tiles/tileset_tile'+ str + '.png',coins)
coins = map( cv2.imread,coins)
coinLocations = findSubImageLocations(level,coins,0.8)
bullets = map(lambda str: 'Tiles/tileset_tile'+ str + '.png',bullets)
bullets = map( cv2.imread,bullets)
bulletLocations = findSubImageLocations(level,bullets,0.8)
plt.imshow(level);
for ii in range(0,solidLocations[0].size):
levelMap[clamp(round(solidLocations[0][ii]/16),0,levelMap.shape[0]-1),clamp(round(solidLocations[1][ii]/16),0,levelMap.shape[1]-1)] = 1
for ii in range(0,breakLocations[0].size):
levelMap[clamp(round(breakLocations[0][ii]/16),0,levelMap.shape[0]-1),clamp(round(breakLocations[1][ii]/16),0,levelMap.shape[1]-1)] = 2
for ii in range(0,goodLocations[0].size):
levelMap[clamp(round(goodLocations[0][ii]/16),0,levelMap.shape[0]-1),clamp(round(goodLocations[1][ii]/16),0,levelMap.shape[1]-1)] = 4
for ii in range(0,enemyLocations[0].size):
levelMap[clamp(round(enemyLocations[0][ii]/16),0,levelMap.shape[0]-1),clamp(round(enemyLocations[1][ii]/16),0,levelMap.shape[1]-1)] = 5
for ii in range(0,coinLocations[0].size):
levelMap[clamp(round(coinLocations[0][ii]/16),0,levelMap.shape[0]-1),clamp(round(coinLocations[1][ii]/16),0,levelMap.shape[1]-1)] = 7
for ii in range(0,bulletLocations[0].size):
levelMap[clamp(round(bulletLocations[0][ii]/16),0,levelMap.shape[0]-1),clamp(round(bulletLocations[1][ii]/16),0,levelMap.shape[1]-1)] = 8
for ii in range(0,pipeLocations[0].size):
for jj in range(0,int(levelMap.shape[0]-1-clamp(round(pipeLocations[0][ii]/16),0,levelMap.shape[0]-1)-1)):
if (levelMap[clamp(round(pipeLocations[0][ii]/16),0,levelMap.shape[0]-1)+jj,clamp(round(pipeLocations[1][ii]/16),0,levelMap.shape[1]-1)] == 0):
levelMap[clamp(round(pipeLocations[0][ii]/16),0,levelMap.shape[0]-1)+jj,clamp(round(pipeLocations[1][ii]/16),0,levelMap.shape[1]-1)] = 6
levelMap[clamp(round(pipeLocations[0][ii]/16),0,levelMap.shape[0]-1)+jj,clamp(round(pipeLocations[1][ii]/16),0,levelMap.shape[1]-1)+1] = 6
for ii in range(0,puLocations[0].size):
levelMap[clamp(round(puLocations[0][ii]/16),0,levelMap.shape[0]-1)+1,clamp(round(puLocations[1][ii]/16),0,levelMap.shape[1]-1)] = 3
minTileSize = 2;
maxTileSize = 8;
#plt.imshow(levelMap)
#plt.show()
for sourceSize in range(2,10,2):
for tileSize in range(2,10,2):
for ii in range(0,levelMap.shape[1],1):
#for jj in range(0,levelMap.shape[0],2):
getTileAndTransitions(tiles,levelMap,ii,0,sourceSize,tileSize,14)
toRemove = []
for tile in tiles:
if len(tiles[tile]["right"]) == 0:
toRemove.append(tile)
while len(toRemove) > 0:
for tile in toRemove:
del tiles[tile]
for tile in tiles:
for rem in toRemove:
if rem in tiles[tile]["right"]:
del tiles[tile]["right"][rem]
toRemove = []
for tile in tiles:
if len(tiles[tile]["right"]) == 0:
toRemove.append(tile)
#print tiles
# plt.plot(puLocations[1],puLocations[0],'gx')
# plt.plot(breakLocations[1],breakLocations[0],'bx')
# plt.plot(goodLocations[1],goodLocations[0],'wx')
# plt.plot(coinLocations[1],coinLocations[0],'yo');
# plt.plot(enemyLocations[1],enemyLocations[0],'ro');
# plt.plot(pipeLocations[1],pipeLocations[0],'go');
# plt.show()
def clamp(val,minimum,maximum):
return max(min(val, maximum), minimum)
def findSubImageLocations(image,subImages,confidence):
allLocations = [ np.array([]) , np.array([])];
for subImage in subImages:
result = cv2.matchTemplate(image,subImage,cv2.TM_CCOEFF_NORMED)
match_indices = np.arange(result.size)[(result>confidence).flatten()]
locations = np.unravel_index(match_indices,result.shape)
allLocations[0] = np.concatenate((allLocations[0],locations[0]))
allLocations[1] = np.concatenate((allLocations[1],locations[1]))
return allLocations
def prefixPostfix(prefix,str,postfix):
return prefix + str + postfix
def getTileAndTransitions(tiles,map,xx,yy,tileWidth,width,height):
tile = getTile(map,xx,yy,tileWidth,height)
if (tile != ""):
left = ""
right = ""
#up = ""
#down = ""
left = getTile(map,xx-width,yy,width,height)
right = getTile(map,xx+tileWidth,yy,width,height)
# if (False):#width == 2):
# left = getTile(map,xx-width*2,yy,width,height)
# right = getTile(map,xx+width,yy,width,height)
# up = getTile(map,xx,yy-height*2,width,height)
# down = getTile(map,xx,yy+height,width,height)
# else :
# left = getTile(map,xx+tileWidth-width,yy,width,height)
# right = getTile(map,xx,yy,width,height)
# up = getTile(map,xx+tileWidth-width,yy-height+2,width,height)
# right = getTile(map,xx,yy-height+2,width,height)
if (tile not in tiles):
#tiles[tile] = {"left":{},"right":{}, "up" : {}, "down" : {}}
tiles[tile] = {"left":{}, "right":{}}
if (left != ""):
if (left in tiles[tile]["left"]):
tiles[tile]["left"][left] += width
else :
tiles[tile]["left"][left] = width
if (right != ""):
if (right in tiles[tile]["right"]):
tiles[tile]["right"][right] += width
else :
tiles[tile]["right"][right] = width
# if (down != ""):
# if (down in tiles[tile]["down"]):
# tiles[tile]["down"][down] += width
# else :
# tiles[tile]["down"][down] = width
# if (up != ""):
# if (up in tiles[tile]["up"]):
# tiles[tile]["up"][up] += width
# else :
# tiles[tile]["up"][up] = width
def getTile(map,xx,yy,width,height):
if (xx + width <= map.shape[1] and yy + height <= map.shape[0] and xx >= 0 and yy >= 0):
outStr = ""
for jj in range(yy,yy+height):
for ii in range(xx,xx+width):
outStr += str(map[jj,ii]) + ","
outStr += ";"
return outStr
else :
return "";
levels = [ 'Mario1/mario-1-1.png','Mario1/mario-1-2.png','Mario1/mario-1-3.png','Mario1/mario-2-1.png',
'Mario1/mario-3-1.png','Mario1/mario-3-2.png','Mario1/mario-3-3.png','Mario1/mario-4-1.png',
'Mario1/mario-4-2.png','Mario1/mario-4-3.png','Mario1/mario-5-1.png','Mario1/mario-5-2.png',
'Mario1/mario-5-3.png','Mario1/mario-6-1.png','Mario1/mario-6-2.png','Mario1/mario-6-3.png',
'Mario1/mario-7-1.png','Mario1/mario-8-1.png','Mario1/mario-8-2.png','Mario1/mario-8-3.png']
tiles = {}
for levelFile in levels:
parseLevel(levelFile,tiles)
json.dump(tiles,open("tiles.json",'w'),sort_keys=True,
indent=4, separators=(',', ': '))
| cc0-1.0 |
maartenbreddels/vaex | tests/groupby_test.py | 1 | 9785 | from common import *
import numpy as np
import vaex
def test_groupby_options():
t = np.arange('2015-01-01', '2015-02-01', dtype=np.datetime64)
y = np.arange(len(t))
sum_answer = [y[k*7:(k+1)*7].sum() for k in range(5)]
mean_answer = [y[k*7:(k+1)*7].mean() for k in range(5)]
df = vaex.from_arrays(t=t, y=y)
by = vaex.BinnerTime.per_week(df.t)
dfg = df.groupby(by, agg={'y': 'sum'})
assert dfg.y.tolist() == sum_answer
dfg = df.groupby(by, agg={'y': vaex.agg.sum})
assert dfg.y.tolist() == sum_answer
dfg = df.groupby(by, agg={'z': vaex.agg.sum('y')})
assert dfg.z.tolist() == sum_answer
dfg = df.groupby(by, agg={'z': vaex.agg.sum(df.y)})
assert dfg.z.tolist() == sum_answer
dfg = df.groupby(by, agg=[vaex.agg.sum('y')])
assert dfg.y_sum.tolist() == sum_answer
dfg = df.groupby(by, agg=[vaex.agg.sum('y'), vaex.agg.mean('y')])
assert dfg.y_sum.tolist() == sum_answer
assert dfg.y_mean.tolist() == mean_answer
dfg = df.groupby(by, agg={'z': [vaex.agg.sum('y'), vaex.agg.mean('y')]})
assert dfg.z_sum.tolist() == sum_answer
assert dfg.z_mean.tolist() == mean_answer
# default is to do all columns
dfg = df.groupby(by, agg=[vaex.agg.sum, vaex.agg.mean])
assert dfg.y_sum.tolist() == sum_answer
assert dfg.y_mean.tolist() == mean_answer
dfg = df.groupby(by, agg=vaex.agg.sum)
assert dfg.y_sum.tolist() == sum_answer
assert "t_sum" not in dfg.get_column_names()
dfg = df.groupby(by, agg=vaex.agg.sum('y'))
assert dfg.y_sum.tolist() == sum_answer
assert "t_sum" not in dfg.get_column_names()
dfg = df.groupby(by, agg=vaex.agg.sum(df.y))
assert dfg.y_sum.tolist() == sum_answer
assert "t_sum" not in dfg.get_column_names()
# coverage only
dfg = df.groupby(df.y, agg=vaex.agg.mean(df.y))
dfg = df.groupby(by).agg({'y': 'sum'})
assert dfg.y.tolist() == [y[k*7:(k+1)*7].sum() for k in range(5)]
dfg = df.groupby(by).agg({'y': 'sum'})
assert dfg.y.tolist() == [y[k*7:(k+1)*7].sum() for k in range(5)]
dfg = df.groupby(by, 'sum')
assert dfg.y_sum.tolist() == sum_answer
def test_groupby_long_name(df_local):
df = df_local.extract()
g = np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2])
df.add_column('g', g)
df['long_name'] = df.x
dfg = df.groupby(by=df.g, agg=[vaex.agg.mean(df.long_name)]).sort('g')
# bugfix check for mixing up the name
assert 'long_name_mean' in dfg
def test_groupby_1d(ds_local):
ds = ds_local.extract()
g = np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2])
ds.add_column('g', g)
dfg = ds.groupby(by=ds.g, agg={'count': vaex.agg.count()}).sort('g')
assert dfg.g.tolist() == [0, 1, 2]
assert dfg['count'].tolist() == [4, 4, 2]
def test_groupby_1d_cat(ds_local):
ds = ds_local.extract()
g = np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2])
ds.add_column('g', g)
ds.categorize('g', labels=['cat', 'dog', 'snake'], inplace=True)
dfg = ds.groupby(by=ds.g, agg='count')
assert dfg.g.tolist() == ['cat', 'dog', 'snake']
assert dfg['count'].tolist() == [4, 4, 2]
def test_groupby_1d_nan(ds_local):
ds = ds_local.extract()
g = np.array([0, 0, 0, 0, 1, 1, 1, np.nan, 2, 2])
ds.add_column('g', g)
dfg = ds.groupby(by=ds.g, agg={'count': vaex.agg.count()}).sort('g')
assert dfg.g.tolist()[:-1] == [0, 1, 2] # last item is nan
assert dfg['count'].tolist() == [4, 3, 2, 1]
def test_binby_1d(ds_local):
ds = ds_local.extract()
g = np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2])
ds.add_column('g', g)
ar = ds.binby(by=ds.g, agg={'count': vaex.agg.count()})
assert ar.coords['g'].values.tolist() == [0, 1, 2]
assert ar.coords['statistic'].values.tolist() == ["count"]
assert ar.dims == ('statistic', 'g')
assert ar.data.tolist() == [[4, 4, 2]]
def test_binby_1d_cat(ds_local):
ds = ds_local.extract()
g = np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2])
ds.add_column('g', g)
ds.categorize('g', labels=['cat', 'dog', 'snake'], inplace=True)
ar = ds.binby(by=ds.g, agg=vaex.agg.count())
assert ar.coords['g'].values.tolist() == ['cat', 'dog', 'snake']
assert ar.data.tolist() == [4, 4, 2]
def test_binby_2d(ds_local):
ds = ds_local.extract()
g = np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2])
h = np.array([5, 5, 5, 6, 5, 5, 5, 5, 6, 6])
ds['g'] = g
ds['h'] = h
ar = ds.binby(by=[ds.g, ds.h], agg={'count': vaex.agg.count()})
assert ar.coords['g'].values.tolist() == [0, 1, 2]
assert ar.coords['h'].values.tolist() == [5, 6]
assert ar.coords['statistic'].values.tolist() == ["count"]
assert ar.dims == ('statistic', 'g', 'h')
assert ar.data.tolist() == [[[3, 1], [4, 0], [0, 2]]]
ar = ds.binby(by=[ds.g, ds.h], agg=vaex.agg.count())
assert ar.dims == ('g', 'h')
assert ar.data.tolist() == [[3, 1], [4, 0], [0, 2]]
def test_groupby_2d(ds_local):
ds = ds_local.extract()
g = np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2])
h = np.array([5, 5, 5, 6, 5, 5, 5, 5, 6, 6])
ds['g'] = g
ds['h'] = h
dfg = ds.groupby(by=[ds.g, ds.h], agg={'count': vaex.agg.count()}).sort('g')
assert dfg.g.tolist() == [0, 0, 1, 2]
assert dfg['count'].tolist() == [3, 1, 4, 2]
def test_groupby_datetime():
t = np.arange('2015-01-01', '2015-02-01', dtype=np.datetime64)
y = np.arange(len(t))
df = vaex.from_arrays(t=t, y=y)
dfg = df.groupby(vaex.BinnerTime.per_week(df.t), agg={'y': 'sum'})
assert dfg.y.tolist() == [y[k*7:(k+1)*7].sum() for k in range(5)]
# other syntax
dfg = df.groupby(vaex.BinnerTime.per_week(df.t)).agg({'y': 'sum'})
assert dfg.y.tolist() == [y[k*7:(k+1)*7].sum() for k in range(5)]
def test_groupby_datetime_quarter():
t = np.arange('2015-01-01', '2016-01-02', dtype=np.datetime64)
y = np.arange(len(t))
df = vaex.from_arrays(t=t, y=y)
dfg = df.groupby(vaex.BinnerTime.per_quarter(df.t)).agg({'y': 'sum'})
values = dfg.y.tolist()
assert len(values) == 5
assert sum(values) == sum(y)
def test_groupby_count():
# ds = ds_local.extract()
g = np.array([0, 0, 0, 0, 1, 1, 1, 1, 0, 1], dtype='int32')
s = np.array(list(map(str, [0, 0, 0, 0, 1, 1, 1, 1, 2, 2])))
df = vaex.from_arrays(g=g, s=s)
groupby = df.groupby('s')
dfg = groupby.agg({'g': 'mean'}).sort('s')
assert dfg.s.tolist() == ['0', '1', '2']
assert dfg.g.tolist() == [0, 1, 0.5]
dfg2 = df.groupby('s', {'g': 'mean'}).sort('s')
assert dfg._equals(dfg2)
def test_groupby_std():
g = np.array([9, 2, 3, 4, 0, 1, 2, 3, 2, 5], dtype='int32')
s = np.array(list(map(str, [0, 0, 0, 0, 1, 1, 1, 1, 2, 2])))
df = vaex.from_arrays(g=g, s=s)
groupby = df.groupby('s')
dfg = groupby.agg({'g': 'std'})
assert dfg.s.tolist() == ['0', '1', '2']
pandas_g = df.to_pandas_df().groupby('s').std(ddof=0).g.tolist()
np.testing.assert_array_almost_equal(dfg.g.tolist(), pandas_g)
def test_groupby_count_string():
g = np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2])
s = np.array(list(map(str, [0, 0, 0, 0, 1, 1, 1, 1, 2, 2])))
df = vaex.from_arrays(g=g, s=s)
groupby = df.groupby('s')
dfg = groupby.agg({'m': vaex.agg.count('s')})
assert dfg.s.tolist() == ['0', '1', '2']
assert dfg.m.tolist() == [4, 4, 2]
@pytest.mark.skip(reason='not yet supported')
def test_groupby_mode():
animals = ['dog', 'dog', 'cat', 'cat', 'dog', 'mouse', 'mouse', 'cat', 'cat', 'dog']
nums = [1, 2, 2, 1, 2, 2, 3, 3, 3, 1]
vehicles = ['car', 'bus', 'car', 'bus', 'car', 'bus', 'plane', 'bus', 'plane', 'car']
df = vaex.from_arrays(animals=animals, nums=nums, vehicles=vehicles)
groupby = df.groupby('nums')
dfg = groupby.agg({'animals': 'mode',
'vehicles': 'mode'})
# Simple case
assert dfg.animals.tolist() == ['dog', 'dog', 'cat']
# Case when there is no clear mode in one sample
grouped_vehicles = dfg.vehicles.tolist()
assert grouped_vehicles[0] == 'car'
assert set(grouped_vehicles[1]) == set({'bus', 'car'})
assert grouped_vehicles[2] == 'plane'
@pytest.mark.skip(reason='not yet supported')
def test_grouby_mode_string():
animals = ['dog', 'dog', 'cat', 'cat', 'dog', 'mouse', 'mouse', 'cat', 'cat', 'dog']
nums = [1, 2, 2, 1, 2, 2, 3, 3, 3, 1]
vehicles = ['car', 'bus', 'car', 'bus', 'car', 'bus', 'plane', 'bus', 'plane', 'car']
df = vaex.from_arrays(animals=animals, nums=nums, vehicles=vehicles)
groupby = df.groupby('vehicles')
dfg = groupby.agg({'animals': 'mode',
'nums': 'mode'})
grouped_animals = dfg.animals.tolist()
assert grouped_animals[0] == 'cat'
assert grouped_animals[1] == 'dog'
assert set(grouped_animals[2]) == set({'cat', 'mouse'}) # Special case when no mode is found
grouped_nums = dfg.nums.tolist()
assert grouped_nums[0] == 2
assert set(grouped_nums[1]) == set({1, 2})
assert grouped_nums[2] == 3 # Special case when no mode is found
def test_groupby_same_result():
h = np.array([0, 0, 0, 1, 1, 1, 1, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4], dtype=int)
df = vaex.from_arrays(h=h)
# Compare value_counts with the groupby counts for the hour column
vc = df.h.value_counts()
with small_buffer(df):
group = df.groupby(by=df.h).agg({'h': 'count'})
# second time it uses a new set, this caused a bug
# see https://github.com/vaexio/vaex/pull/233
group = df.groupby(by=df.h).agg({'h': 'count'})
group_sort = group.sort(by='count', ascending=False)
assert vc.values.tolist() == group_sort['count'].values.tolist(), 'counts are not correct.'
assert vc.index.tolist() == group_sort['h'].values.tolist(), 'the indices of the counts are not correct.'
| mit |
exa-analytics/exa | exa/core/tests/test_container.py | 1 | 3829 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Tests for :mod:`~exa.core.container`
#######################################
"""
import sys
from os import remove
from unittest import TestCase
from tempfile import mkdtemp
import pandas as pd
from pandas.core.dtypes.dtypes import CategoricalDtype
from exa import Container, TypedMeta, DataFrame, Series, Field
class DummyDataFrame(DataFrame):
_index = 'index'
_categories = {'cat': str}
_columns = ['x', 'y', 'z', 'cat']
class DummySeries(Series):
_precision = 3
_sname = 'field'
_iname = 'value'
class DummyMeta(TypedMeta):
s0 = DummySeries
s1 = DummySeries
df = DummyDataFrame
class DummyContainer(Container, metaclass=DummyMeta):
pass
class TestContainer(TestCase):
@classmethod
def setUpClass(cls):
x = [0, 0, 0, 0, 0]
y = [1.1, 2.2, 3.3, 4.4, 5.5]
z = [0.5, 1.5, 2.5, 3.5, 4.5]
cat = ['cube', 'sphere', 'cube', 'sphere', 'cube']
group = [0, 0, 1, 1, 1]
cls.container = DummyContainer()
cls.container._test = False
cls.container.s0 = DummySeries(y)
cls.container.s1 = DummySeries(cat, dtype='category')
cls.container.df = DataFrame.from_dict({'x': x, 'y': y, 'z': z, 'cat': cat, 'group': group})
cls.container._cardinal = "df"
def test_attributes(self):
self.assertIsInstance(self.container.s0, DummySeries)
self.assertIsInstance(self.container.s1.dtype, CategoricalDtype)
self.assertIsInstance(self.container.df, DummyDataFrame)
def test_copy(self):
cp = self.container.copy()
self.assertIsNot(self.container, cp)
cp = self.container.copy(name="name", description="descr", meta={'key': "value"})
self.assertEqual(cp.name, "name")
self.assertEqual(cp.description, "descr")
self.assertDictEqual(cp.meta, {'key': "value"})
def test_concat(self):
with self.assertRaises(NotImplementedError):
self.container.concat()
def test_slice_naive(self):
c = self.container[[0]].copy()
self.assertEqual(c.df.shape, (1, 5))
c = self.container[1:]
self.assertEqual(c.df.shape, (4, 5))
c = self.container.slice_naive([0])
self.assertEqual(c.df.shape, (1, 5))
c = self.container.slice_naive(0)
self.assertEqual(c.df.shape, (1, 5))
c = self.container.slice_naive(slice(0, 1))
self.assertEqual(c.df.shape, (1, 5))
def test_getsizeof(self):
size_bytes = sys.getsizeof(self.container)
self.assertIsInstance(size_bytes, int)
self.assertTrue(size_bytes > 100)
def test_memory_usage(self):
mem = self.container.memory_usage()
self.assertEqual(mem.shape, (5, ))
mem = self.container.memory_usage(True)
self.assertIsInstance(mem, str)
def test_save_load_to_hdf(self):
tmpdir = mkdtemp()
path = self.container.save()
self.assertTrue(path.endswith(".hdf5"))
remove(path)
path = self.container.save(tmpdir)
self.assertTrue(path.endswith(".hdf5"))
remove(path)
with self.assertRaises(ValueError):
self.container.save(tmpdir + "/stuff.things")
self.container.to_hdf(path)
c = Container.load(path)
self.assertEqual(c.df.shape, self.container.df.shape)
c = Container.from_hdf(path)
self.assertEqual(c.df.shape, self.container.df.shape)
remove(path)
def test_dunder(self):
c = Container(x=DataFrame())
self.assertTrue(hasattr(c, "x"))
del c["x"]
self.assertFalse(hasattr(c, "x"))
with self.assertRaises(AttributeError):
c["x"]
| apache-2.0 |
cesc-park/attend2u | scripts/generate_yfcc_hashtag_dataset.py | 1 | 11404 | import operator
import re
import os
import json
import logging
from collections import Counter
import urllib
from tqdm import tqdm
import colorlog
from sklearn.feature_extraction.text import TfidfTransformer
import numpy as np
from unidecode import unidecode
#####################
# Hyperparameters
#####################
CONTEXT_LENGTH = 100
HASHTAG_VOCAB_SIZE = 60000
DATA_ROOT_PATH = '../data_yfcc'
# For dataset
HASHTAG_TRAIN_JSON_FNAME = os.path.join(
DATA_ROOT_PATH, 'json', 'yfcc-hashtag-train.json'
)
HASHTAG_TEST1_JSON_FNAME = os.path.join(
DATA_ROOT_PATH, 'json', 'yfcc-hashtag-test1.json'
)
HASHTAG_TEST2_JSON_FNAME = os.path.join(
DATA_ROOT_PATH, 'json', 'yfcc-hashtag-test2.json'
)
HASHTAG_OUTPUT_PATH = os.path.join(DATA_ROOT_PATH, 'hashtag_dataset')
HASHTAG_VOCAB_FNAME = os.path.join(
HASHTAG_OUTPUT_PATH, '%d.vocab' % (HASHTAG_VOCAB_SIZE)
)
# For vocaulary
_PAD = "_PAD"
_GO = "_GO"
_EOS = "_EOS"
_UNK = "_UNK"
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
# For tokenization
try:
# UCS-4
EMOTICON = re.compile(u'(([\U00002600-\U000027BF])|([\U0001f300-\U0001f64F])|([\U0001f680-\U0001f6FF]))')
except Exception, e:
# UCS-2
EMOTICON = re.compile(u'(([\u2600-\u27BF])|([\uD83C][\uDF00-\uDFFF])|([\uD83D][\uDC00-\uDE4F])|([\uD83D][\uDE80-\uDEFF]))')
NOT_EMOTICON = re.compile(r'(\\U([0-9A-Fa-f]){8})|(\\u([0-9A-Fa-f]){4})')
# Regular expressions used to tokenize
_WORD_SPLIT = re.compile(b"([.,!?\"':;)(])")
_DIGIT_RE = re.compile(br"\d+")
_URL_RE = re.compile(r'(?i)\b((?:http[s]?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>"\']+))')
_HREF_RE = re.compile('<a href="(.*?)".*>(.*)</a>')
def sort_dict(dic):
# Sort by alphabet
sorted_pair_list = sorted(dic.items(), key=operator.itemgetter(0))
# Sort by count
sorted_pair_list = sorted(sorted_pair_list, key=operator.itemgetter(1), reverse=True)
return sorted_pair_list
def load_json(json_fname):
colorlog.info("Load %s" % (json_fname))
with open(json_fname, 'r') as f:
json_object = json.load(f)
return json_object
def tokenize(sentences):
"""Tokenize a sentence"""
if isinstance(sentences, list):
sentences = ' '.join(sentences)
# Change separator to space
sentences = sentences.split(',')
output = []
for sentence in sentences:
sentence = urllib.unquote_plus(sentence)
sentence = sentence.strip()
# Remove https
sentence = _HREF_RE.sub("", sentence)
sentence = _URL_RE.sub("", sentence)
# Remove <b></b>
sentence = re.sub(r"<b>", " ", sentence)
sentence = re.sub(r"</b>", " ", sentence)
# Delete square bracket and +
sentence = re.sub('\[', " ", sentence)
sentence = re.sub('\]', " ", sentence)
sentence = re.sub('\+', " ", sentence)
sentence = re.sub('\)', " ", sentence)
sentence = re.sub('\(', " ", sentence)
sentence = re.sub('~', " ", sentence)
sentence = re.sub('=', " ", sentence)
# Lowercase
sentence = sentence.lower()
# Delete punctuations
sentence = re.sub(r"([.\*,!?\"@#'|:;)(])", "", sentence)
# Delte EMOJI
sentence = EMOTICON.sub(r' ', sentence)
# Run unidecode
sentence = unidecode(sentence)
sentence = NOT_EMOTICON.sub(r' ', sentence)
sentence = re.sub(r'\\\\', '', sentence)
sentence = re.sub('\/', '', sentence)
sentence = re.sub(r'\\', '', sentence)
# Normalize digit
sentence = _DIGIT_RE.sub(b"0", sentence)
sentence = re.sub(r"(?<![a-zA-Z])0(?![a-zA-Z])", r"", sentence) # remove "-" if there is no preceed or following
# Incoporate - and -
sentence = re.sub(r"[\-_]", r"-", sentence)
sentence = re.sub(r"(?<![a-zA-Z0-9])\-(?![a-zA-Z0-9])", r"", sentence) # remove "-" if there is no preceed or following
# Escape unicode
sentence = sentence.encode('unicode-escape').decode('unicode-escape').encode('ascii', 'ignore').decode('ascii')
splitted_sentence = sentence.split()
if len(splitted_sentence) == 2:
output.append('_'.join(splitted_sentence))
elif len(splitted_sentence) == 1:
output.append(splitted_sentence[0])
return output
def tokenize_all(train_json, test1_json, test2_json):
"""
Tokenize sentences in raw dataset
Args:
train_json, test1_json, test2_json: raw json object
key: 'caption' or 'tags'
"""
token_counter = Counter()
train_tokens = {}
test1_tokens = {}
test2_tokens = {}
# Train data
for user_id, posts in tqdm(train_json.items(), ncols=70, desc="train data"):
train_tokens[user_id] = {}
for post in posts:
tags = tokenize(post['user tags'])
post_id = post['page url'].split('/')[-2]
post_tokens = tags
train_tokens[user_id][post_id] = post_tokens
for post_token in post_tokens:
token_counter[post_token] += 1
# Test1 data
for user_id, posts in tqdm(test1_json.items(), ncols=70, desc="test1 data"):
test1_tokens[user_id] = {}
for post in posts:
tags = tokenize(post['user tags'])
post_id = post['page url'].split('/')[-2]
post_tokens = tags
test1_tokens[user_id][post_id] = post_tokens
# Test2 data
for user_id, posts in tqdm(test2_json.items(), ncols=70, desc="test2 data"):
test2_tokens[user_id] = {}
for post in posts:
tags = tokenize(post['user tags'])
post_id = post['page url'].split('/')[-2]
post_tokens = tags
test2_tokens[user_id][post_id] = post_tokens
return token_counter, train_tokens, test1_tokens, test2_tokens
def get_tfidf_words(train_tokens, test1_tokens, test2_tokens, vocab, rev_vocab):
colorlog.info("Get tfidf words")
def _preprocess(all_tokens, rev_vocab):
counter = np.zeros([len(all_tokens), len(rev_vocab)])
user_ids = []
for i, (user_id, posts) in enumerate(
tqdm(all_tokens.items(), ncols=70, desc="preprocess")
):
user_ids.append(user_id)
for post_id, tokens in posts.items():
token_ids = [rev_vocab.get(token, UNK_ID) for token in tokens]
for token_id in token_ids:
counter[i, token_id] += 1
return counter, user_ids
train_counter, train_user_ids = _preprocess(train_tokens, rev_vocab)
test1_counter, test1_user_ids = _preprocess(test1_tokens, rev_vocab)
test2_counter, test2_user_ids = _preprocess(test2_tokens, rev_vocab)
colorlog.info("Fit and transform train tfidf")
vectorizer = TfidfTransformer()
train_tfidf = vectorizer.fit_transform(train_counter).toarray()
test1_tfidf = vectorizer.transform(test1_counter).toarray()
test2_tfidf = vectorizer.transform(test2_counter).toarray()
def _extract_tokens(tfidfs, user_ids, vocab):
user_tokens = {}
for i, user_id in enumerate(user_ids):
tfidf = np.argsort(-tfidfs[i])[:CONTEXT_LENGTH]
weight = np.sort(-tfidfs[i])[:CONTEXT_LENGTH]
tokens = []
for j, (index, token_weight) in enumerate(zip(tfidf, weight)):
token = vocab[index]
if token_weight < 0.0:
if index != UNK_ID:
tokens.append(token)
else:
break
user_tokens[user_id] = tokens
return user_tokens
colorlog.info("Extract tokens from tfidf matrix")
train_user_tokens = _extract_tokens(train_tfidf, train_user_ids, vocab)
test1_user_tokens = _extract_tokens(test1_tfidf, test1_user_ids, vocab)
test2_user_tokens = _extract_tokens(test2_tfidf, test2_user_ids, vocab)
return train_user_tokens, test1_user_tokens, test2_user_tokens
def create_vocabulary(counter, fname, vocab_size):
colorlog.info("Create vocabulary %s" % (fname))
sorted_tokens = sort_dict(counter)
vocab = _START_VOCAB + [x[0] for x in sorted_tokens]
if len(vocab) > vocab_size:
vocab = vocab[:vocab_size]
with open(fname, 'w') as f:
for w in vocab:
f.write(w + "\n")
rev_vocab = {}
for i, token in enumerate(vocab):
rev_vocab[token] = i
return vocab, rev_vocab
def save_data(train_data, test1_data, test2_data, output_path, rev_vocab, remove_unk=False):
"""
Data format:
numpyfname,contextlength,captionlength,contexttoken1_contexttoken2,wordtoken1_wordtoken2
e.g. 12345.npy,4,3,445_24_445_232,134_466_234
"""
def _save_data(all_tokens, all_tfidf, fname, remove_unk=True):
all_strings = []
for user_id, posts in all_tokens.items():
context_tokenids = map(
str, [rev_vocab.get(token, UNK_ID) for token in all_tfidf[user_id]]
)
context_length = str(len(context_tokenids))
context_string = '_'.join(context_tokenids)
for post_id, tokens in posts.items():
caption_tokenids = map(
str, [rev_vocab.get(token, UNK_ID) for token in tokens]
)
if remove_unk:
filtered_tokenids = []
for tokenid in caption_tokenids:
if tokenid != str(UNK_ID):
filtered_tokenids.append(tokenid)
caption_tokenids = filtered_tokenids
caption_length = str(len(caption_tokenids))
caption_string = '_'.join(caption_tokenids)
numpy_string = '%s_%s.npy' % (user_id, post_id)
all_string = ','.join([
numpy_string, context_length, caption_length,
context_string, caption_string
])
all_strings.append((all_string + '\n', len(caption_tokenids)))
# sort by caption length
all_strings = sorted(all_strings, key=lambda x: x[1])
with open(fname, 'w') as f:
for all_string in all_strings:
f.write(all_string[0])
_save_data(
train_data[0], train_data[1], os.path.join(output_path, "train.txt")
)
_save_data(
test1_data[0], test1_data[1], os.path.join(output_path, "test1.txt"),
False
)
_save_data(
test2_data[0], test2_data[1], os.path.join(output_path, "test2.txt"),
False
)
def main():
colorlog.basicConfig(
filename=None,
level=logging.INFO,
format="%(log_color)s[%(levelname)s:%(asctime)s]%(reset)s %(message)s",
datafmt="%Y-%m-%d %H:%M:%S"
)
if not os.path.exists(HASHTAG_OUTPUT_PATH):
colorlog.info("Create directory %s" % (HASHTAG_OUTPUT_PATH))
os.makedirs(HASHTAG_OUTPUT_PATH)
# Load raw data
hashtag_train_json = load_json(HASHTAG_TRAIN_JSON_FNAME)
hashtag_test1_json = load_json(HASHTAG_TEST1_JSON_FNAME)
hashtag_test2_json = load_json(HASHTAG_TEST2_JSON_FNAME)
# Tokenize all
hashtag_counter, hashtag_train_tokens, hashtag_test1_tokens, \
hashtag_test2_tokens = tokenize_all(
hashtag_train_json,
hashtag_test1_json,
hashtag_test2_json
)
with open('hashcounter.txt', 'w') as f:
for key, value in hashtag_counter.most_common():
f.write("%s : %d\n" % (key, value))
# Create vocabulary
hashtag_vocab, hashtag_rev_vocab = create_vocabulary(
hashtag_counter, HASHTAG_VOCAB_FNAME, HASHTAG_VOCAB_SIZE
)
# Get tfidf weighted tokens
hashtag_train_tfidf_tokens, hashtag_test1_tfidf_tokens, \
hashtag_test2_tfidf_tokens = get_tfidf_words(
hashtag_train_tokens,
hashtag_test1_tokens,
hashtag_test2_tokens,
hashtag_vocab,
hashtag_rev_vocab
)
# Save data
save_data(
(hashtag_train_tokens, hashtag_train_tfidf_tokens),
(hashtag_test1_tokens, hashtag_test1_tfidf_tokens),
(hashtag_test2_tokens, hashtag_test2_tfidf_tokens),
HASHTAG_OUTPUT_PATH,
hashtag_rev_vocab,
True
)
if __name__ == '__main__':
main()
| mit |
ky822/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
cdeil/gammalib | inst/cta/test/test_radial_acceptance.py | 1 | 2481 | #! /usr/bin/env python
# ===========================================================================================#
# This script displays the radial acceptance model that may be used to model the
# CTA radial acceptance.
#
# Required 3rd party modules:
# - matplotlib
#
# Copyright (C) 2011 Jurgen Knodlseder
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================================#
import matplotlib.pyplot as plt
from gammalib import *
from math import *
# ========== #
# Show model #
# ========== #
def show_model(xmlfile):
"""
Show radial acceptance model from the XML file using matplotlib.
"""
# Load the model
models = GModels(xmlfile)
# Extract radial acceptance model
radial = cast_GCTAModelRadialAcceptance(models["Background"]).radial()
# Create angular axis (from 0 to 4 deg)
thetas = [i * 0.05 for i in range(80)]
# Get model values
values = [radial.eval(theta) for theta in thetas]
values_grad = [radial.eval_gradients(theta) for theta in thetas]
# Create figure
plt.figure(1)
plt.title("Radial acceptance model (" + radial.type() + ")")
# Plot data
plt.plot(thetas, values, 'r-')
plt.plot(thetas, values_grad, 'ro')
# Set axes
plt.xlabel("Offset angle (deg)")
plt.ylabel("Function value")
# Show plot
plt.show()
# Return
return
#==========================#
# Main routine entry point #
#==========================#
if __name__ == '__main__':
"""
Show radial acceptance models.
"""
# Dump header
print
print "*********************************"
print "* Show radial acceptance models *"
print "*********************************"
# Display various models
show_model("data/crab.xml")
show_model("data/crab_poly.xml")
show_model("data/crab_profile.xml")
| gpl-3.0 |
tomlof/scikit-learn | sklearn/ensemble/tests/test_forest.py | 19 | 41737 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.94, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", "mae", "friedman_mse")):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, criterion, X, y):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
@skip_if_32bit
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, name, criterion, X, y
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse", "mae"]):
yield check_importances, name, criterion, X, y
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1, random_state=0).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1,
random_state=0).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name
def check_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_split=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=0).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=1.1).fit, X, y)
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
def test_min_samples_split():
for name in FOREST_ESTIMATORS:
yield check_min_samples_split, name
def check_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=0).fit, X, y)
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), len(X) * 0.25 - 1,
"Failed with {0}".format(name))
def test_min_samples_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name
def check_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1,
random_state=0)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(n_estimators=1,
random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Using a Python 2.x list as the sample_weight parameter used to raise
# an exception. This test makes sure such code will now run correctly.
clf = ForestClassifier()
sample_weight = [1.] * len(iris.data)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='balanced', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
def check_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert_equal(indicator.shape[1], n_nodes_ptr[-1])
assert_equal(indicator.shape[0], n_samples)
assert_array_equal(np.diff(n_nodes_ptr),
[e.tree_.node_count for e in est.estimators_])
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
def test_decision_path():
for name in FOREST_CLASSIFIERS:
yield check_decision_path, name
for name in FOREST_REGRESSORS:
yield check_decision_path, name
| bsd-3-clause |
nikitasingh981/scikit-learn | examples/linear_model/plot_ridge_path.py | 27 | 2129 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
This example also shows the usefulness of applying Ridge regression
to highly ill-conditioned matrices. For such matrices, a slight
change in the target variable can cause huge variances in the
calculated weights. In such cases, it is useful to set a certain
regularization (alpha) to reduce this variation (noise).
When alpha is very large, the regularization effect dominates the
squared loss function and the coefficients tend to zero.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations. In practise it is necessary to tune alpha
in such a way that a balance is maintained between both.
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
coefs = []
for a in alphas:
ridge = linear_model.Ridge(alpha=a, fit_intercept=False)
ridge.fit(X, y)
coefs.append(ridge.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
FederatedAI/FATE | python/federatedml/feature/feature_scale/test/min_max_scale_test.py | 1 | 17320 | import copy
import time
import unittest
import numpy as np
from fate_arch.session import computing_session as session
from sklearn.preprocessing import MinMaxScaler as MMS
from federatedml.feature.feature_scale.min_max_scale import MinMaxScale
from federatedml.feature.instance import Instance
from federatedml.param.scale_param import ScaleParam
from federatedml.util.param_extract import ParamExtract
class TestMinMaxScaler(unittest.TestCase):
def setUp(self):
self.test_data = [
[0, 1, 10, 2, 3, 1],
[1, 2, 9, 2, 4, 2],
[0, 3, 8, 3, 3, 3],
[1, 4, 7, 4, 4, 4],
[1, 5, 6, 5, 5, 5],
[1, 6, 5, 6, 6, -100],
[0, 7, 4, 7, 7, 7],
[0, 8, 3, 8, 6, 8],
[0, 9, 2, 9, 9, 9],
[0, 10, 1, 10, 10, 10]
]
str_time = time.strftime("%Y%m%d%H%M%S", time.localtime())
self.test_instance = []
for td in self.test_data:
# self.test_instance.append(Instance(features=td))
self.test_instance.append(Instance(features=np.array(td, dtype=float)))
session.init(str_time)
self.table_instance = self.data_to_table(self.test_instance)
self.table_instance.schema['header'] = ["fid" + str(i) for i in range(len(self.test_data[0]))]
def print_table(self, table):
for v in (list(table.collect())):
print("id:{}, value:{}".format(v[0], v[1].features))
def data_to_table(self, data, partition=1):
data_table = session.parallelize(data, include_key=False, partition=partition)
return data_table
def sklearn_attribute_format(self, scaler, feature_range):
format_output = []
for i in range(scaler.data_min_.shape[0]):
col_transform_value = (scaler.data_min_[i], scaler.data_max_[i])
format_output.append(col_transform_value)
return format_output
def get_table_instance_feature(self, table_instance):
res_list = []
for k, v in list(table_instance.collect()):
res_list.append(list(v.features))
return res_list
def get_scale_param(self):
component_param = {
"method": "standard_scale",
"mode": "normal",
"scale_col_indexes": []
}
scale_param = ScaleParam()
param_extracter = ParamExtract()
param_extracter.parse_param_from_config(scale_param, component_param)
print("scale_param:{}".format(type(scale_param)))
return scale_param
# test with (mode='normal', area='all', feat_upper=None, feat_lower=None)
def test_fit_instance_default(self):
scale_param = self.get_scale_param()
scale_param.scale_col_indexes = -1
scale_obj = MinMaxScale(scale_param)
fit_instance = scale_obj.fit(self.table_instance)
column_min_value = scale_obj.column_min_value
column_max_value = scale_obj.column_max_value
scaler = MMS()
scaler.fit(self.test_data)
self.assertListEqual(np.round(self.get_table_instance_feature(fit_instance), 6).tolist(),
np.around(scaler.transform(self.test_data), 6).tolist())
data_min = list(scaler.data_min_)
data_max = list(scaler.data_max_)
self.assertListEqual(column_min_value, data_min)
self.assertListEqual(column_max_value, data_max)
transform_data = scale_obj.transform(self.table_instance)
self.assertListEqual(self.get_table_instance_feature(fit_instance),
self.get_table_instance_feature(transform_data))
# test with (area="all", upper=2, lower=1):
def test_fit1(self):
scale_param = self.get_scale_param()
scale_param.scale_column_idx = []
scale_param.feat_upper = 2
scale_param.feat_lower = 1
scale_obj = MinMaxScale(scale_param)
fit_instance = scale_obj.fit(self.table_instance)
column_min_value = scale_obj.column_min_value
column_max_value = scale_obj.column_max_value
for i, line in enumerate(self.test_data):
for j, value in enumerate(line):
if value > 2:
self.test_data[i][j] = 2
elif value < 1:
self.test_data[i][j] = 1
scaler = MMS()
scaler.fit(self.test_data)
self.assertListEqual(self.get_table_instance_feature(fit_instance),
np.around(scaler.transform(self.test_data), 6).tolist())
data_min = list(scaler.data_min_)
data_max = list(scaler.data_max_)
self.assertListEqual(column_min_value, data_min)
self.assertListEqual(column_max_value, data_max)
transform_data = scale_obj.transform(self.table_instance)
self.assertListEqual(self.get_table_instance_feature(fit_instance),
self.get_table_instance_feature(transform_data))
# test with (area="all", upper=[2,2,2,2,2,2], lower=[1,1,1,1,1,1]):
def test_fit2(self):
scale_param = self.get_scale_param()
scale_param.scale_column_idx = []
scale_param.feat_upper = [2, 2, 2, 2, 2, 2]
scale_param.feat_lower = [1, 1, 1, 1, 1, 1]
scale_obj = MinMaxScale(scale_param)
fit_instance = scale_obj.fit(self.table_instance)
column_min_value = scale_obj.column_min_value
column_max_value = scale_obj.column_max_value
for i, line in enumerate(self.test_data):
for j, value in enumerate(line):
if value > 2:
self.test_data[i][j] = 2
elif value < 1:
self.test_data[i][j] = 1
scaler = MMS()
scaler.fit(self.test_data)
self.assertListEqual(self.get_table_instance_feature(fit_instance),
np.around(scaler.transform(self.test_data), 6).tolist())
data_min = list(scaler.data_min_)
data_max = list(scaler.data_max_)
self.assertListEqual(column_min_value, data_min)
self.assertListEqual(column_max_value, data_max)
transform_data = scale_obj.transform(self.table_instance)
self.assertListEqual(self.get_table_instance_feature(fit_instance),
self.get_table_instance_feature(transform_data))
# test with (area="col", scale_column_idx=[1,2,4], upper=[2,2,2,2,2,2], lower=[1,1,1,1,1,1]):
def test_fit3(self):
scale_column_idx = [1, 2, 4]
scale_param = self.get_scale_param()
# scale_param.area = "col"
scale_param.feat_upper = [2, 2, 2, 2, 2, 2]
scale_param.feat_lower = [1, 1, 1, 1, 1, 1]
scale_param.scale_col_indexes = scale_column_idx
scale_obj = MinMaxScale(scale_param)
fit_instance = scale_obj.fit(self.table_instance)
column_min_value = scale_obj.column_min_value
column_max_value = scale_obj.column_max_value
raw_data = copy.deepcopy(self.test_data)
for i, line in enumerate(self.test_data):
for j, value in enumerate(line):
if j in scale_column_idx:
if value > 2:
self.test_data[i][j] = 2
elif value < 1:
self.test_data[i][j] = 1
scaler = MMS()
scaler.fit(self.test_data)
sklearn_transform_data = np.around(scaler.transform(self.test_data), 6).tolist()
for i, line in enumerate(sklearn_transform_data):
for j, cols in enumerate(line):
if j not in scale_column_idx:
sklearn_transform_data[i][j] = raw_data[i][j]
self.assertListEqual(self.get_table_instance_feature(fit_instance), sklearn_transform_data)
for i, line in enumerate(sklearn_transform_data):
for j, cols in enumerate(line):
if j not in scale_column_idx:
sklearn_transform_data[i][j] = raw_data[i][j]
data_min = list(scaler.data_min_)
data_max = list(scaler.data_max_)
self.assertListEqual(column_min_value, data_min)
self.assertListEqual(column_max_value, data_max)
transform_data = scale_obj.transform(self.table_instance)
self.assertListEqual(self.get_table_instance_feature(fit_instance),
self.get_table_instance_feature(transform_data))
# test with (area="col", scale_column_idx=[1,2,4], upper=[2,2,2,2,2,2], lower=[1,1,1,1,1,1]):
def test_fit4(self):
scale_column_idx = [1, 2, 4]
scale_param = self.get_scale_param()
# scale_param.area = "col"
scale_param.feat_upper = 2
scale_param.feat_lower = 1
scale_param.scale_col_indexes = scale_column_idx
scale_obj = MinMaxScale(scale_param)
fit_instance = scale_obj.fit(self.table_instance)
column_min_value = scale_obj.column_min_value
column_max_value = scale_obj.column_max_value
raw_data = copy.deepcopy(self.test_data)
for i, line in enumerate(self.test_data):
for j, value in enumerate(line):
if j in scale_column_idx:
if value > 2:
self.test_data[i][j] = 2
elif value < 1:
self.test_data[i][j] = 1
scaler = MMS()
scaler.fit(self.test_data)
sklearn_transform_data = np.around(scaler.transform(self.test_data), 6).tolist()
for i, line in enumerate(sklearn_transform_data):
for j, cols in enumerate(line):
if j not in scale_column_idx:
sklearn_transform_data[i][j] = raw_data[i][j]
self.assertListEqual(self.get_table_instance_feature(fit_instance), sklearn_transform_data)
for i, line in enumerate(sklearn_transform_data):
for j, cols in enumerate(line):
if j not in scale_column_idx:
sklearn_transform_data[i][j] = raw_data[i][j]
data_min = list(scaler.data_min_)
data_max = list(scaler.data_max_)
self.assertListEqual(column_min_value, data_min)
self.assertListEqual(column_max_value, data_max)
transform_data = scale_obj.transform(self.table_instance)
self.assertListEqual(self.get_table_instance_feature(fit_instance),
self.get_table_instance_feature(transform_data))
# test with (area="col", scale_column_idx=[1,2,4], upper=[2,2,2,2,2,2], lower=[1,1,1,1,1,1]):
def test_fit5(self):
scale_column_idx = [1, 2, 4]
scale_param = self.get_scale_param()
scale_param.mode = "cap"
# scale_param.area = "col"
scale_param.feat_upper = 0.8
scale_param.feat_lower = 0.2
scale_param.scale_col_indexes = scale_column_idx
scale_obj = MinMaxScale(scale_param)
fit_instance = scale_obj.fit(self.table_instance)
column_min_value = scale_obj.column_min_value
column_max_value = scale_obj.column_max_value
raw_data = copy.deepcopy(self.test_data)
gt_cap_lower_list = [0, 2, 2, 2, 3, 1]
gt_cap_upper_list = [1, 8, 8, 8, 7, 8]
for i, line in enumerate(self.test_data):
for j, value in enumerate(line):
if value > gt_cap_upper_list[j]:
self.test_data[i][j] = gt_cap_upper_list[j]
elif value < gt_cap_lower_list[j]:
self.test_data[i][j] = gt_cap_lower_list[j]
scaler = MMS()
scaler.fit(self.test_data)
sklearn_transform_data = np.around(scaler.transform(self.test_data), 6).tolist()
for i, line in enumerate(sklearn_transform_data):
for j, cols in enumerate(line):
if j not in scale_column_idx:
sklearn_transform_data[i][j] = raw_data[i][j]
self.assertListEqual(self.get_table_instance_feature(fit_instance), sklearn_transform_data)
for i, line in enumerate(sklearn_transform_data):
for j, cols in enumerate(line):
if j not in scale_column_idx:
sklearn_transform_data[i][j] = raw_data[i][j]
data_min = list(scaler.data_min_)
data_max = list(scaler.data_max_)
self.assertListEqual(column_min_value, data_min)
self.assertListEqual(column_max_value, data_max)
transform_data = scale_obj.transform(self.table_instance)
self.assertListEqual(self.get_table_instance_feature(fit_instance),
self.get_table_instance_feature(transform_data))
# test with (area="col", scale_column_idx=[1,2,4], upper=[2,2,2,2,2,2], lower=[1,1,1,1,1,1]):
def test_fit5(self):
scale_column_idx = [1, 2, 4]
scale_names = ['fid1', 'fid2', 'fid4', 'fid1000']
scale_param = self.get_scale_param()
scale_param.mode = "cap"
# scale_param.area = "col"
scale_param.feat_upper = 0.8
scale_param.feat_lower = 0.2
scale_param.scale_names = scale_names
scale_param.scale_col_indexes = []
scale_obj = MinMaxScale(scale_param)
fit_instance = scale_obj.fit(self.table_instance)
column_min_value = scale_obj.column_min_value
column_max_value = scale_obj.column_max_value
raw_data = copy.deepcopy(self.test_data)
gt_cap_lower_list = [0, 2, 2, 2, 3, 1]
gt_cap_upper_list = [1, 8, 8, 8, 7, 8]
for i, line in enumerate(self.test_data):
for j, value in enumerate(line):
if value > gt_cap_upper_list[j]:
self.test_data[i][j] = gt_cap_upper_list[j]
elif value < gt_cap_lower_list[j]:
self.test_data[i][j] = gt_cap_lower_list[j]
scaler = MMS()
scaler.fit(self.test_data)
sklearn_transform_data = np.around(scaler.transform(self.test_data), 6).tolist()
for i, line in enumerate(sklearn_transform_data):
for j, cols in enumerate(line):
if j not in scale_column_idx:
sklearn_transform_data[i][j] = raw_data[i][j]
self.assertListEqual(self.get_table_instance_feature(fit_instance), sklearn_transform_data)
for i, line in enumerate(sklearn_transform_data):
for j, cols in enumerate(line):
if j not in scale_column_idx:
sklearn_transform_data[i][j] = raw_data[i][j]
data_min = list(scaler.data_min_)
data_max = list(scaler.data_max_)
self.assertListEqual(column_min_value, data_min)
self.assertListEqual(column_max_value, data_max)
transform_data = scale_obj.transform(self.table_instance)
self.assertListEqual(self.get_table_instance_feature(fit_instance),
self.get_table_instance_feature(transform_data))
# test with (area="col", scale_column_idx=[1,2,4], upper=[2,2,2,2,2,2], lower=[1,1,1,1,1,1]):
def test_fit5(self):
scale_column_idx = [1, 2, 4]
scale_names = ['fid1', 'fid2', 'fid1000']
scale_param = self.get_scale_param()
scale_param.mode = "cap"
# scale_param.area = "col"
scale_param.feat_upper = 0.8
scale_param.feat_lower = 0.2
scale_param.scale_names = scale_names
scale_param.scale_col_indexes = [2, 4]
scale_obj = MinMaxScale(scale_param)
fit_instance = scale_obj.fit(self.table_instance)
column_min_value = scale_obj.column_min_value
column_max_value = scale_obj.column_max_value
raw_data = copy.deepcopy(self.test_data)
gt_cap_lower_list = [0, 2, 2, 2, 3, 1]
gt_cap_upper_list = [1, 8, 8, 8, 7, 8]
for i, line in enumerate(self.test_data):
for j, value in enumerate(line):
if value > gt_cap_upper_list[j]:
self.test_data[i][j] = gt_cap_upper_list[j]
elif value < gt_cap_lower_list[j]:
self.test_data[i][j] = gt_cap_lower_list[j]
scaler = MMS()
scaler.fit(self.test_data)
sklearn_transform_data = np.around(scaler.transform(self.test_data), 6).tolist()
for i, line in enumerate(sklearn_transform_data):
for j, cols in enumerate(line):
if j not in scale_column_idx:
sklearn_transform_data[i][j] = raw_data[i][j]
fit_data = np.round(self.get_table_instance_feature(fit_instance), 6).tolist()
self.assertListEqual(fit_data, sklearn_transform_data)
for i, line in enumerate(sklearn_transform_data):
for j, cols in enumerate(line):
if j not in scale_column_idx:
sklearn_transform_data[i][j] = raw_data[i][j]
data_min = list(scaler.data_min_)
data_max = list(scaler.data_max_)
self.assertListEqual(column_min_value, data_min)
self.assertListEqual(column_max_value, data_max)
transform_data = scale_obj.transform(self.table_instance)
self.assertListEqual(self.get_table_instance_feature(fit_instance),
self.get_table_instance_feature(transform_data))
def tearDown(self):
session.stop()
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
ZenDevelopmentSystems/scikit-learn | examples/decomposition/plot_image_denoising.py | 181 | 5819 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of the Lena image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import lena
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
# Load Lena image and extract patches
lena = lena() / 256.0
# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 4.0
height, width = lena.shape
# Distort the right half of the image
print('Distorting image...')
distorted = lena.copy()
distorted[:, height // 2:] += 0.075 * np.random.randn(width, height // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :height // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from Lena patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, lena, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, height // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = lena.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(
patches, (width, height // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], lena,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
ioana-delaney/spark | python/setup.py | 5 | 10182 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import glob
import os
import sys
from setuptools import setup, find_packages
from shutil import copyfile, copytree, rmtree
if sys.version_info < (2, 7):
print("Python versions prior to 2.7 are not supported for pip installed PySpark.",
file=sys.stderr)
sys.exit(-1)
try:
exec(open('pyspark/version.py').read())
except IOError:
print("Failed to load PySpark version file for packaging. You must be in Spark's python dir.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__
# A temporary path so we can access above the Python project root and fetch scripts and jars we need
TEMP_PATH = "deps"
SPARK_HOME = os.path.abspath("../")
# Provide guidance about how to use setup.py
incorrect_invocation_message = """
If you are installing pyspark from spark source, you must first build Spark and
run sdist.
To build Spark with maven you can run:
./build/mvn -DskipTests clean package
Building the source dist is done in the Python directory:
cd python
python setup.py sdist
pip install dist/*.tar.gz"""
# Figure out where the jars are we need to package with PySpark.
JARS_PATH = glob.glob(os.path.join(SPARK_HOME, "assembly/target/scala-*/jars/"))
if len(JARS_PATH) == 1:
JARS_PATH = JARS_PATH[0]
elif (os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1):
# Release mode puts the jars in a jars directory
JARS_PATH = os.path.join(SPARK_HOME, "jars")
elif len(JARS_PATH) > 1:
print("Assembly jars exist for multiple scalas ({0}), please cleanup assembly/target".format(
JARS_PATH), file=sys.stderr)
sys.exit(-1)
elif len(JARS_PATH) == 0 and not os.path.exists(TEMP_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
EXAMPLES_PATH = os.path.join(SPARK_HOME, "examples/src/main/python")
SCRIPTS_PATH = os.path.join(SPARK_HOME, "bin")
DATA_PATH = os.path.join(SPARK_HOME, "data")
LICENSES_PATH = os.path.join(SPARK_HOME, "licenses")
SCRIPTS_TARGET = os.path.join(TEMP_PATH, "bin")
JARS_TARGET = os.path.join(TEMP_PATH, "jars")
EXAMPLES_TARGET = os.path.join(TEMP_PATH, "examples")
DATA_TARGET = os.path.join(TEMP_PATH, "data")
LICENSES_TARGET = os.path.join(TEMP_PATH, "licenses")
# Check and see if we are under the spark path in which case we need to build the symlink farm.
# This is important because we only want to build the symlink farm while under Spark otherwise we
# want to use the symlink farm. And if the symlink farm exists under while under Spark (e.g. a
# partially built sdist) we should error and have the user sort it out.
in_spark = (os.path.isfile("../core/src/main/scala/org/apache/spark/SparkContext.scala") or
(os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1))
def _supports_symlinks():
"""Check if the system supports symlinks (e.g. *nix) or not."""
return getattr(os, "symlink", None) is not None
if (in_spark):
# Construct links for setup
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
sys.exit(-1)
# If you are changing the versions here, please also change ./python/pyspark/sql/utils.py and
# ./python/run-tests.py. In case of Arrow, you should also check ./pom.xml.
_minimum_pandas_version = "0.19.2"
_minimum_pyarrow_version = "0.8.0"
try:
# We copy the shell script to be under pyspark/python/pyspark so that the launcher scripts
# find it where expected. The rest of the files aren't copied because they are accessed
# using Python imports instead which will be resolved correctly.
try:
os.makedirs("pyspark/python/pyspark")
except OSError:
# Don't worry if the directory already exists.
pass
copyfile("pyspark/shell.py", "pyspark/python/pyspark/shell.py")
if (in_spark):
# Construct the symlink farm - this is necessary since we can't refer to the path above the
# package root and we need to copy the jars and scripts which are up above the python root.
if _supports_symlinks():
os.symlink(JARS_PATH, JARS_TARGET)
os.symlink(SCRIPTS_PATH, SCRIPTS_TARGET)
os.symlink(EXAMPLES_PATH, EXAMPLES_TARGET)
os.symlink(DATA_PATH, DATA_TARGET)
os.symlink(LICENSES_PATH, LICENSES_TARGET)
else:
# For windows fall back to the slower copytree
copytree(JARS_PATH, JARS_TARGET)
copytree(SCRIPTS_PATH, SCRIPTS_TARGET)
copytree(EXAMPLES_PATH, EXAMPLES_TARGET)
copytree(DATA_PATH, DATA_TARGET)
copytree(LICENSES_PATH, LICENSES_TARGET)
else:
# If we are not inside of SPARK_HOME verify we have the required symlink farm
if not os.path.exists(JARS_TARGET):
print("To build packaging must be in the python directory under the SPARK_HOME.",
file=sys.stderr)
if not os.path.isdir(SCRIPTS_TARGET):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
# Scripts directive requires a list of each script path and does not take wild cards.
script_names = os.listdir(SCRIPTS_TARGET)
scripts = list(map(lambda script: os.path.join(SCRIPTS_TARGET, script), script_names))
# We add find_spark_home.py to the bin directory we install so that pip installed PySpark
# will search for SPARK_HOME with Python.
scripts.append("pyspark/find_spark_home.py")
# Parse the README markdown file into rst for PyPI
long_description = "!!!!! missing pandoc do not upload to PyPI !!!!"
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except ImportError:
print("Could not import pypandoc - required to package PySpark", file=sys.stderr)
except OSError:
print("Could not convert - pandoc is not installed", file=sys.stderr)
setup(
name='pyspark',
version=VERSION,
description='Apache Spark Python API',
long_description=long_description,
author='Spark Developers',
author_email='dev@spark.apache.org',
url='https://github.com/apache/spark/tree/master/python',
packages=['pyspark',
'pyspark.mllib',
'pyspark.mllib.linalg',
'pyspark.mllib.stat',
'pyspark.ml',
'pyspark.ml.linalg',
'pyspark.ml.param',
'pyspark.sql',
'pyspark.streaming',
'pyspark.bin',
'pyspark.jars',
'pyspark.python.pyspark',
'pyspark.python.lib',
'pyspark.data',
'pyspark.licenses',
'pyspark.examples.src.main.python'],
include_package_data=True,
package_dir={
'pyspark.jars': 'deps/jars',
'pyspark.bin': 'deps/bin',
'pyspark.python.lib': 'lib',
'pyspark.data': 'deps/data',
'pyspark.licenses': 'deps/licenses',
'pyspark.examples.src.main.python': 'deps/examples',
},
package_data={
'pyspark.jars': ['*.jar'],
'pyspark.bin': ['*'],
'pyspark.python.lib': ['*.zip'],
'pyspark.data': ['*.txt', '*.data'],
'pyspark.licenses': ['*.txt'],
'pyspark.examples.src.main.python': ['*.py', '*/*.py']},
scripts=scripts,
license='http://www.apache.org/licenses/LICENSE-2.0',
install_requires=['py4j==0.10.6'],
setup_requires=['pypandoc'],
extras_require={
'ml': ['numpy>=1.7'],
'mllib': ['numpy>=1.7'],
'sql': [
'pandas>=%s' % _minimum_pandas_version,
'pyarrow>=%s' % _minimum_pyarrow_version,
]
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy']
)
finally:
# We only cleanup the symlink farm if we were in Spark, otherwise we are installing rather than
# packaging.
if (in_spark):
# Depending on cleaning up the symlink farm or copied version
if _supports_symlinks():
os.remove(os.path.join(TEMP_PATH, "jars"))
os.remove(os.path.join(TEMP_PATH, "bin"))
os.remove(os.path.join(TEMP_PATH, "examples"))
os.remove(os.path.join(TEMP_PATH, "data"))
os.remove(os.path.join(TEMP_PATH, "licenses"))
else:
rmtree(os.path.join(TEMP_PATH, "jars"))
rmtree(os.path.join(TEMP_PATH, "bin"))
rmtree(os.path.join(TEMP_PATH, "examples"))
rmtree(os.path.join(TEMP_PATH, "data"))
rmtree(os.path.join(TEMP_PATH, "licenses"))
os.rmdir(TEMP_PATH)
| apache-2.0 |
evgchz/scikit-learn | examples/svm/plot_svm_nonlinear.py | 61 | 1089 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learn by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
| bsd-3-clause |
dopplershift/MetPy | docs/conf.py | 1 | 12146 | #!/usr/bin/env python3
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from datetime import datetime
from pathlib import Path
import re
import sys
import metpy
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use resolve() to make it absolute, like shown here.
cwd = Path.cwd().resolve()
sys.path.insert(0, str(cwd))
sys.path.insert(0, str(cwd.parent.parent))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '2.1'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx_gallery.gen_gallery',
'matplotlib.sphinxext.plot_directive',
'myst_parser'
]
sphinx_gallery_conf = {
'doc_module': ('metpy',),
'reference_url': {
'metpy': None,
},
'examples_dirs': [str(cwd.parent / 'examples'), str(cwd.parent / 'tutorials')],
'gallery_dirs': ['examples', 'tutorials'],
'filename_pattern': r'\.py',
'backreferences_dir': str(Path('api') / 'generated'),
'default_thumb_file': str(Path('_static') / 'metpy_150x150_white_bg.png'),
'abort_on_example_error': True
}
# Turn off code and image links for embedded mpl plots
plot_html_show_source_link = False
plot_html_show_formats = False
plot_formats = ['png']
plot_rcparams = {'savefig.bbox': 'tight'}
# Set up mapping for other projects' docs
intersphinx_mapping = {
'pint': ('https://pint.readthedocs.io/en/stable/', None),
'matplotlib': ('https://matplotlib.org/stable/', None),
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://numpy.org/doc/stable/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'xarray': ('https://xarray.pydata.org/en/stable/', None)
}
# Tweak how docs are formatted
napoleon_use_rtype = False
# Control main class documentation
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = ['.rst', '.md']
# Controlling automatically generating summary tables in the docs
autosummary_generate = True
autosummary_imported_members = True
# The encoding of source files.
# source_encoding = 'utf-8-sig'
cur_date = datetime.utcnow()
# The main toctree document.
master_doc = 'index'
# General information about the project.
project = 'MetPy'
# noinspection PyShadowingBuiltins
copyright = (f'2008\u2013{cur_date:%Y}, MetPy Developers. '
'Development supported by National Science Foundation grants '
'AGS-1344155, OAC-1740315, and AGS-1901712')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
verinfo = metpy.__version__
parsed_version = re.search(r'(?P<full>(?P<base>\d+\.\d+)\.\d+\w*)', verinfo).groupdict()
# The short X.Y version.
version = parsed_version['base']
if '+' in verinfo:
version += 'dev'
# The full version, including alpha/beta/rc tags.
release = parsed_version['full']
rst_prolog = f'''
.. |cite_version| replace:: {release}
.. |cite_year| replace:: {cur_date:%Y}
.. |access_date| replace:: {cur_date:%d %B %Y}
'''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'autolink'
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pydata_sphinx_theme'
html_theme_options = {
'github_url': 'https://github.com/Unidata/MetPy',
'twitter_url': 'https://twitter.com/MetPy',
'external_links': [
{'name': 'Release Notes', 'url': 'https://github.com/Unidata/MetPy/releases'},
],
'use_edit_page_button': False,
'google_analytics_id': 'UA-92978945-1',
'search_bar_position': 'navbar',
# 'navbar_align': 'left' # Coming in next release of theme
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {'canonical_url': 'https://unidata.github.io/MetPy/latest/'}
# Extra variables that will be available to the templates. Used to create the
# links to the Github repository sources and issues
html_context = {
'doc_path': 'docs',
'galleries': sphinx_gallery_conf['gallery_dirs'],
'gallery_dir': dict(zip(sphinx_gallery_conf['gallery_dirs'],
sphinx_gallery_conf['examples_dirs'])),
'api_dir': 'api/generated',
'github_user': 'Unidata',
'github_repo': 'MetPy',
'github_version': 'main', # Make changes to the main branch
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = ' '.join((project, version))
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = str(Path('_static') / 'metpy_horizontal.png')
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = str(Path('_static') / 'metpy_32x32.ico')
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['theme-unidata.css']
html_js_files = ['doc_shared.js']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y at %H:%M:%S'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MetPydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'MetPy.tex', 'MetPy Documentation',
'MetPy Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'metpy', 'MetPy Documentation',
['MetPy Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'MetPy', 'MetPy Documentation',
'MetPy Developers', 'MetPy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
linkcheck_ignore = [r'https://www\.youtube\.com/watch\?v=[\d\w\-_]+',
r'https://codecov.io/github/Unidata/MetPy',
r'https://youtu\.be/[\d\w\-_]+',
# AMS DOIs should be stable, but resolved link consistently 403's with linkcheck
r'https://doi.org/10.1175/.*',
# Giving 404s right now and is not going to change
r'https://twitter.com/MetPy']
linkcheck_request_headers = {'https://www.ncdc.noaa.gov/': {'Accept-Encoding': 'deflate'}}
| bsd-3-clause |
poryfly/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
jjx02230808/project0223 | examples/ensemble/plot_voting_decision_regions.py | 230 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
abele/bokeh | bokeh/compat/mplexporter/renderers/vega_renderer.py | 54 | 5284 | import warnings
import json
import random
from .base import Renderer
from ..exporter import Exporter
class VegaRenderer(Renderer):
def open_figure(self, fig, props):
self.props = props
self.figwidth = int(props['figwidth'] * props['dpi'])
self.figheight = int(props['figheight'] * props['dpi'])
self.data = []
self.scales = []
self.axes = []
self.marks = []
def open_axes(self, ax, props):
if len(self.axes) > 0:
warnings.warn("multiple axes not yet supported")
self.axes = [dict(type="x", scale="x", ticks=10),
dict(type="y", scale="y", ticks=10)]
self.scales = [dict(name="x",
domain=props['xlim'],
type="linear",
range="width",
),
dict(name="y",
domain=props['ylim'],
type="linear",
range="height",
),]
def draw_line(self, data, coordinates, style, label, mplobj=None):
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
dataname = "table{0:03d}".format(len(self.data) + 1)
# TODO: respect the other style settings
self.data.append({'name': dataname,
'values': [dict(x=d[0], y=d[1]) for d in data]})
self.marks.append({'type': 'line',
'from': {'data': dataname},
'properties': {
"enter": {
"interpolate": {"value": "monotone"},
"x": {"scale": "x", "field": "data.x"},
"y": {"scale": "y", "field": "data.y"},
"stroke": {"value": style['color']},
"strokeOpacity": {"value": style['alpha']},
"strokeWidth": {"value": style['linewidth']},
}
}
})
def draw_markers(self, data, coordinates, style, label, mplobj=None):
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
dataname = "table{0:03d}".format(len(self.data) + 1)
# TODO: respect the other style settings
self.data.append({'name': dataname,
'values': [dict(x=d[0], y=d[1]) for d in data]})
self.marks.append({'type': 'symbol',
'from': {'data': dataname},
'properties': {
"enter": {
"interpolate": {"value": "monotone"},
"x": {"scale": "x", "field": "data.x"},
"y": {"scale": "y", "field": "data.y"},
"fill": {"value": style['facecolor']},
"fillOpacity": {"value": style['alpha']},
"stroke": {"value": style['edgecolor']},
"strokeOpacity": {"value": style['alpha']},
"strokeWidth": {"value": style['edgewidth']},
}
}
})
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
if text_type == 'xlabel':
self.axes[0]['title'] = text
elif text_type == 'ylabel':
self.axes[1]['title'] = text
class VegaHTML(object):
def __init__(self, renderer):
self.specification = dict(width=renderer.figwidth,
height=renderer.figheight,
data=renderer.data,
scales=renderer.scales,
axes=renderer.axes,
marks=renderer.marks)
def html(self):
"""Build the HTML representation for IPython."""
id = random.randint(0, 2 ** 16)
html = '<div id="vis%d"></div>' % id
html += '<script>\n'
html += VEGA_TEMPLATE % (json.dumps(self.specification), id)
html += '</script>\n'
return html
def _repr_html_(self):
return self.html()
def fig_to_vega(fig, notebook=False):
"""Convert a matplotlib figure to vega dictionary
if notebook=True, then return an object which will display in a notebook
otherwise, return an HTML string.
"""
renderer = VegaRenderer()
Exporter(renderer).run(fig)
vega_html = VegaHTML(renderer)
if notebook:
return vega_html
else:
return vega_html.html()
VEGA_TEMPLATE = """
( function() {
var _do_plot = function() {
if ( (typeof vg == 'undefined') && (typeof IPython != 'undefined')) {
$([IPython.events]).on("vega_loaded.vincent", _do_plot);
return;
}
vg.parse.spec(%s, function(chart) {
chart({el: "#vis%d"}).update();
});
};
_do_plot();
})();
"""
| bsd-3-clause |
ivano666/tensorflow | tensorflow/contrib/learn/__init__.py | 4 | 1832 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# TODO(ptucker,ipolosukhin): Improve descriptions.
"""High level API for learning with TensorFlow.
## Estimators
Train and evaluate TensorFlow models.
@@BaseEstimator
@@Estimator
@@ModeKeys
@@TensorFlowClassifier
@@TensorFlowDNNClassifier
@@TensorFlowDNNRegressor
@@TensorFlowEstimator
@@TensorFlowLinearClassifier
@@TensorFlowLinearRegressor
@@TensorFlowRNNClassifier
@@TensorFlowRNNRegressor
@@TensorFlowRegressor
## Graph actions
Perform various training, evaluation, and inference actions on a graph.
@@NanLossDuringTrainingError
@@RunConfig
@@evaluate
@@infer
@@run_feeds
@@run_n
@@train
## Input processing
Queue and read batched input data.
@@extract_dask_data
@@extract_dask_labels
@@extract_pandas_data
@@extract_pandas_labels
@@extract_pandas_matrix
@@read_batch_examples
@@read_batch_features
@@read_batch_record_features
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
from tensorflow.python.util.all_util import make_all
__all__ = make_all(__name__)
__all__.append('datasets')
| apache-2.0 |
sibis-platform/ncanda-datacore | scripts/reporting/xnat_sessions_report.py | 2 | 11745 |
#!/usr/bin/env python
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
##
"""
XNAT Sessions Report
Check for valid scanning sessions and time windows by first caching all the
XNAT session XML files and then parsing these files for necessary info. Note
that to create the XML file cache you need to run with --update
Example
=======
- When running for the first time run
./xnat_sessions_report.py --update
so that the cach (located at experimentsdir) is created
- Update the cache (stored in experimentsdir) and generate the baseline report
./xnat_sessions_report.py --update --baseline
- Use the existing cache to extract 10 in the followup window
./xnat_sessions_report.py --num_extract 10 --min 180 --max 540
"""
from __future__ import print_function
import os
import sys
import pandas as pd
import sibispy
from sibispy import sibislogger as slog
import xnat_extractor as xe
verbose = None
def get_scan_type_pairs(modality):
"""
Get a dictionary of series description based on modality
:param modality: str (anatomy, diffusion, functional)
:return: dict
"""
scan_type_pairs = dict(scan1=None, scan2=None)
if modality == 'anatomy':
t1_scan_types = ['ncanda-t1spgr-v1', 'ncanda-mprage-v1']
t2_scan_types = ['ncanda-t2fse-v1']
scan_type_pairs.update(scan1=t1_scan_types,
scan2=t2_scan_types)
elif modality == 'diffusion':
print("Has to be updated as check does not include dti30b400 - look in redcap/export_measures")
sys.exit()
pepolar = ['ncanda-dti6b500pepolar-v1']
dwi = ['ncanda-dti60b1000-v1']
scan_type_pairs.update(scan1=pepolar,
scan2=dwi)
elif modality == 'functional':
fmri = ['ncanda-rsfmri-v1']
fieldmap = ['ncanda-grefieldmap-v1']
scan_type_pairs.update(scan1=fmri,
scan2=fieldmap)
return scan_type_pairs
def main(args=None):
# TODO: Handle when T1 and T2 are in separate session (i.e., rescan)
# Upload all data experimentsdir
if args.update:
slog.init_log(False, False,'xnat_sesions_report', 'xnat_sesions_report',None)
session = sibispy.Session()
session.configure()
if not session.configure() :
if verbose:
print("Error: session configure file was not found")
sys.exit()
server = session.connect_server('xnat_http', True)
if not server:
print("Error: could not connect to xnat server!")
sys.exit()
xe.extract_experiment_xml(session,args.experimentsdir, args.num_extract)
# extract info from the experiment XML files
experiment = xe.get_experiments_dir_info(args.experimentsdir)
# Scan specific information
scan = xe.get_experiments_dir_scan_info(args.experimentsdir)
# Session info
reading = xe.get_experiments_dir_reading_info(args.experimentsdir)
df = xe.merge_experiments_scans_reading(experiment, scan, reading)
# exclude phantoms, including the traveling human phantoms
site_id_pattern = '[A-E]-[0-9]{5}-[MF]-[0-9]'
df = df[df.site_id.str.contains(site_id_pattern)]
# exclude subjects not part of study
df = df[df['subject_id'] != 'NCANDA_S00127']
if args.unknown :
print("Sessions that have not yet been quality controlled")
scanCheckList = pd.DataFrame()
required_scans = ['ncanda-mprage-v1','ncanda-t1spgr-v1','ncanda-t2fse-v1','ncanda-dti6b500pepolar-v1','ncanda-dti30b400-v1','ncanda-dti60b1000-v1','ncanda-grefieldmap-v1','ncanda-rsfmri-v1']
for eid in df.experiment_id.drop_duplicates():
eid_df = df[df.experiment_id == eid]
eid_df = eid_df[~pd.isnull(eid_df['quality'])]
if not len(eid_df[eid_df['quality'] != 'unknown']) :
print(eid)
else :
unknownScanDF = eid_df[eid_df['quality'] == 'unknown']
mandatoryCheck = unknownScanDF[unknownScanDF['scan_type'].isin(required_scans)]
if len(mandatoryCheck) :
scanCheckList = scanCheckList.append(mandatoryCheck)
print(" ")
print("Mandatory scans that have not yet been quality controlled (status unknown)")
if len(scanCheckList) :
pd.set_option('display.max_rows', len(scanCheckList))
print(scanCheckList['scan_type'])
sys.exit()
if args.ignore_window or args.session_notes or args.scan_notes :
if args.usable :
df = df[df['quality'] == 'usable']
columns = ['site_id', 'subject_id', 'experiment_id', 'experiment_date','excludefromanalysis']
if args.ignore_window or args.scan_notes :
columns = columns + ['scan_id', 'scan_type', 'quality']
if args.scan_notes :
columns = columns + [ 'scan_note']
if args.session_notes :
columns = columns + [ 'note' ]
result = df[columns]
# print result
else :
df.loc[:, 'experiment_date'] = df.experiment_date.astype('datetime64')
result = pd.DataFrame()
for subject_id in df.subject_id.drop_duplicates():
subject_df = df[df.subject_id == subject_id]
# find the earliest exam date for each given subject
grouping = subject_df.groupby('subject_id')
baseline_date = grouping['experiment_date'].nsmallest(1)
baseline_df = subject_df[subject_df.experiment_date == baseline_date[0]]
# Find window for follow-up
day_min = pd.datetools.Day(n=args.min)
day_max = pd.datetools.Day(n=args.max)
followup_min = baseline_df.experiment_date + day_min
followup_max = baseline_df.experiment_date + day_max
df_min = subject_df.experiment_date > followup_min[0]
df_max = subject_df.experiment_date < followup_max[0]
followup_df = subject_df[df_min & df_max]
# Included followup sessions slightly outside window
included = ['NCANDA_E02615', 'NCANDA_E02860']
included_df = subject_df[subject_df.experiment_id.isin(included)]
if included_df.shape[0]:
followup_df = included_df
# Create report for baseline visit
if args.baseline:
followup_df = baseline_df
# filter for specific scan types
scan_type_pairs = get_scan_type_pairs(args.modality)
scan1 = scan_type_pairs.get('scan1')
scan2 = scan_type_pairs.get('scan2')
scan1_df = followup_df[followup_df.scan_type.isin(scan1)]
scan2_df = followup_df[followup_df.scan_type.isin(scan2)]
# Filter quality column
if args.usable :
scan1_selected = scan1_df[scan1_df.quality == 'usable']
scan2_selected = scan2_df[scan2_df.quality == 'usable']
else :
scan1_selected = scan1_df
scan2_selected = scan2_df
# report columns
columns = ['site_id', 'subject_id', 'experiment_id', 'experiment_date',
'excludefromanalysis', 'note', 'scan_type', 'quality',
'scan_note']
scan1_recs = scan1_selected.loc[:, columns].to_records(index=False)
scan2_recs = scan2_selected.loc[:, columns].to_records(index=False)
scan1_report = pd.DataFrame(scan1_recs,
index=scan1_selected.experiment_id)
scan2_report = pd.DataFrame(scan2_recs,
index=scan2_selected.experiment_id)
scan1_scan2_report = scan1_report.join(scan2_report[['scan_type',
'quality',
'scan_note']],
lsuffix='_scan1',
rsuffix='_scan2',
how='inner')
if scan1_scan2_report.shape[0]:
result = result.append(scan1_scan2_report)
#
# Write out results
#
# Remove any duplicate rows due to extra usable scan types (i.e., fieldmaps)
result = result.drop_duplicates()
result.to_csv(args.outfile, index=False)
if __name__ == "__main__":
import sys
import argparse
parser = argparse.ArgumentParser(prog='xnat_sessions_report.py',
description=__doc__)
parser.add_argument('-c', '--config',
type=str,
default=os.path.join(os.path.expanduser('~'),
'.server_config', 'ncanda.cfg'))
parser.add_argument('-b', '--baseline',
action='store_true',
help='Create report for baseline visit.')
parser.add_argument('-e', '--experimentsdir',
type=str,
default='/tmp/experiments',
help='Name of experiments xml directory')
parser.add_argument('-m', '--modality',
type=str,
default='anatomy',
choices=['anatomy', 'diffusion', 'functional'],
help='Name of experiments xml directory')
parser.add_argument('--min',
type=int,
default=180,
help='Minimum days from baseline (to specify followup '
'1y, only impacts final report but not -u option)')
parser.add_argument('--max',
type=int,
default=540,
help='Maximum days from baseline (to specify followup '
'1y, only impacts final report but not -u option)')
parser.add_argument('--ignore-window',
action='store_true',
help='Just list sessions regardless of window')
parser.add_argument('--usable',
action='store_true',
help='Only list scans with usable image quality')
parser.add_argument('--unknown',
action='store_true',
help='Only list sessions that have unknown scans, i.e. have not been reviewed')
parser.add_argument('--session-notes',
action='store_true',
help='create report with session notes')
parser.add_argument('--scan-notes',
action='store_true',
help='include scan notes in the report')
parser.add_argument('-o', '--outfile',
type=str,
default='/tmp/usability_report.csv',
help='Name of csv file to write.')
parser.add_argument('-n', '--num_extract',
type=int,
help='Number of sessions to extract (only works in '
'connection with -u)')
parser.add_argument('-u', '--update',
action='store_true',
help='Update the cache of xml files')
parser.add_argument('-v', '--verbose',
action='store_true',
help='Print verbose output.')
argv = parser.parse_args()
verbose = argv.verbose
xe.verbose = argv.verbose
sys.exit(main(args=argv))
| bsd-3-clause |
krez13/scikit-learn | examples/manifold/plot_mds.py | 45 | 2731 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
s = 100
plt.scatter(X_true[:, 0], X_true[:, 1], color='navy', s=s, lw=0,
label='True Position')
plt.scatter(pos[:, 0], pos[:, 1], color='turquoise', s=s, lw=0, label='MDS')
plt.scatter(npos[:, 0], npos[:, 1], color='darkorange', s=s, lw=0, label='NMDS')
plt.legend(scatterpoints=1, loc='best', shadow=False)
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.Blues,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
iaroslav-ai/noxer | noxer/gm/vae.py | 2 | 4553 | '''This script demonstrates how to build a variational autoencoder with Keras.
Reference: "Auto-Encoding Variational Bayes" https://arxiv.org/abs/1312.6114
'''
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from .base import GeneratorBase
from sklearn.base import BaseEstimator, TransformerMixin
from keras.layers import Input, Dense, Lambda, Layer
from keras.models import Model
from keras import backend as K
from keras import metrics
from keras.optimizers import Adam
from keras.layers.merge import Concatenate
from noxer.rnn import make_keras_picklable
make_keras_picklable()
class VaeGenerator(GeneratorBase):
def __init__(self, latent_dim=2, intermediate_dim=256,
batch_size=100, epochs=50, D=1.0, epsilon_std=1.0,
lr=0.001, beta_1=0.9, beta_2=0.999):
super(VaeGenerator, self).__init__()
self.latent_dim = latent_dim
self.batch_size = batch_size
self.epochs = epochs
self.intermediate_dim = intermediate_dim
self.D = D
self.epsilon_std = epsilon_std
self.lr = lr
self.beta_1 = beta_1
self.beta_2 = beta_2
def fit(self, X, Y, **kwargs):
self.intermediate_dim = int(self.intermediate_dim)
condition_dim = X.shape[-1]
original_dim = Y.shape[-1]
self.condition_dim = condition_dim
Y_smpl = Input(shape=(original_dim,))
X_cond = Input(shape=(condition_dim,))
R_norm = Input(shape=(self.latent_dim,))
YX_conc = Concatenate()([Y_smpl, X_cond])
h = Dense(self.intermediate_dim, activation='relu')(YX_conc)
z_mean = Dense(self.latent_dim)(h)
z_log_var = Dense(self.latent_dim)(h)
def sampling(args):
z_mean, z_log_var, epsilon = args
return z_mean + K.exp(z_log_var / 2) * epsilon
# note that "output_shape" isn't necessary with the TensorFlow backend
latent_g = Lambda(sampling, output_shape=(self.latent_dim,))([z_mean, z_log_var, R_norm])
# we instantiate these layers separately so as to reuse them later
decoder_h = Dense(self.intermediate_dim, activation='relu')
decoder_mean = Dense(original_dim, activation='linear')
zx = Concatenate()([latent_g, X_cond])
h_decoded = decoder_h(zx)
y_decoded_mean = decoder_mean(h_decoded)
vae_determinizm = self.D
# Custom loss layer
class VariationalLossLayer(Layer):
def __init__(self, **kwargs):
self.is_placeholder = True
super(VariationalLossLayer, self).__init__(**kwargs)
def vae_loss(self, x, x_decoded_mean):
xent_loss = metrics.mean_squared_error(x, x_decoded_mean)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return K.mean(xent_loss * vae_determinizm + kl_loss)
def call(self, inputs):
x, x_decoded_mean, condition = inputs
loss = self.vae_loss(x, x_decoded_mean)
self.add_loss(loss, inputs=inputs)
# We won't actually use the output.
return x
y = VariationalLossLayer()([Y_smpl, y_decoded_mean, X_cond])
vae = Model([R_norm, X_cond, Y_smpl], y)
vae.compile(
optimizer=Adam(
lr=self.lr, beta_1=self.beta_1, beta_2=self.beta_2
),
loss=None
)
for i in range(self.epochs):
R = self._generate_noise(len(X))
vae.fit([R, X, Y],
shuffle=True,
epochs=1,
batch_size=self.batch_size,
verbose=0)
# build a model to project inputs on the latent space
self.encoder = Model([Y_smpl, X_cond], z_mean)
# build a digit generator that can sample from the learned distribution
decoder_latent = Input(shape=(self.latent_dim,))
decoder_condit = Input(shape=(self.condition_dim,))
zx = Concatenate()([decoder_condit, decoder_latent])
_h_decoded = decoder_h(zx)
_x_decoded_mean = decoder_mean(_h_decoded)
self.generator = Model([decoder_condit, decoder_latent], _x_decoded_mean)
def _generate_noise(self, N):
return np.random.randn(N, self.latent_dim)
def predict(self, X, *args, **kwargs):
lat = self._generate_noise(len(X))
Yp = self.generator.predict([X, lat], verbose=0)
return Yp
| mit |
kaushik94/pymc | pymc/examples/stochastic_volatility.py | 2 | 4664 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
from matplotlib.pylab import *
import numpy as np
from pymc import *
from pymc.distributions.timeseries import *
from scipy.sparse import csc_matrix
from scipy import optimize
# <markdowncell>
# Asset prices have time-varying volatility (variance of day over day `returns`). In some periods, returns are highly variable, while in others very stable. Stochastic volatility models model this with a latent volatility variable, modeled as a stochastic process. The following model is similar to the one described in the No-U-Turn Sampler paper, Hoffman (2011) p21.
#
# $$ \sigma \sim Exponential(50) $$
#
# $$ \nu \sim Exponential(.1) $$
#
# $$ s_i \sim Normal(s_{i-1}, \sigma^{-2}) $$
#
# $$ log(\frac{y_i}{y_{i-1}}) \sim t(\nu, 0, exp(-2 s_i)) $$
#
# Here, $y$ is the daily return series and $s$ is the latent log
# volatility process.
# <markdowncell>
# ## Build Model
# <markdowncell>
# First we load some daily returns of the S&P 500.
# <codecell>
n = 400
returns = np.genfromtxt(get_data_file('pymc.examples', "data/SP500.csv"))[-n:]
returns[:5]
# <markdowncell>
# Specifying the model in pymc mirrors its statistical specification.
#
# However, it is easier to sample the scale of the log volatility process innovations, $\sigma$, on a log scale, so we create it using `TransformedVar` and use `logtransform`. `TransformedVar` creates one variable in the transformed space and one in the normal space. The one in the transformed space (here $\text{log}(\sigma) $) is the one over which sampling will occur, and the one in the normal space is the one to use throughout the rest of the model.
#
# It takes a variable name, a distribution and a transformation to use.
# <codecell>
model = Model()
with model:
sigma, log_sigma = model.TransformedVar(
'sigma', Exponential.dist(1. / .02, testval=.1),
logtransform)
nu = Exponential('nu', 1. / 10)
s = GaussianRandomWalk('s', sigma ** -2, shape=n)
r = T('r', nu, lam=exp(-2 * s), observed=returns)
# <markdowncell>
# ## Fit Model
#
# To get a decent scaling matrix for the Hamiltonian sampler, we find the Hessian at a point. The method `Model.d2logpc` gives us a `Theano` compiled function that returns the matrix of 2nd derivatives.
#
# However, the 2nd derivatives for the degrees of freedom parameter, `nu`, are negative and thus not very informative and make the matrix non-positive definite, so we replace that entry with a reasonable guess at the scale. The interactions between `log_sigma`/`nu` and `s` are also not very useful, so we set them to zero.
#
# The Hessian matrix is also sparse, so we can get faster sampling by
# using a sparse scaling matrix. If you have `scikits.sparse` installed,
# convert the Hessian to a csc matrixs by uncommenting the appropriate
# line below.
# <codecell>
H = model.fastd2logp()
def hessian(point, nusd):
h = H(Point(point))
h[1, 1] = nusd ** -2
h[:2, 2:] = h[2:, :2] = 0
# h = csc_matrix(h)
return h
# <markdowncell>
# For this model, the full maximum a posteriori (MAP) point is degenerate and has infinite density. However, if we fix `log_sigma` and `nu` it is no longer degenerate, so we find the MAP with respect to the volatility process, 's', keeping `log_sigma` and `nu` constant at their default values.
#
# We use L-BFGS because it is more efficient for high dimensional
# functions (`s` has n elements).
# <codecell>
with model:
start = find_MAP(vars=[s], fmin=optimize.fmin_l_bfgs_b)
# <markdowncell>
# We do a short initial run to get near the right area, then start again
# using a new Hessian at the new starting point to get faster sampling due
# to better scaling. We do a short run since this is an interactive
# example.
# <codecell>
with model:
step = NUTS(model.vars, hessian(start, 6))
def run(n=2000):
if n == "short":
n = 50
with model:
trace = sample(5, step, start, trace=model.vars + [sigma])
# Start next run at the last sampled position.
start2 = trace.point(-1)
step2 = HamiltonianMC(model.vars, hessian(start2, 6), path_length=4.)
trace = sample(n, step2, trace=trace)
# <codecell>
# figsize(12,6)
title(str(s))
plot(trace[s][::10].T, 'b', alpha=.03)
xlabel('time')
ylabel('log volatility')
# figsize(12,6)
traceplot(trace, model.vars[:-1])
if __name__ == '__main__':
run()
# <markdowncell>
# ## References
#
# 1. Hoffman & Gelman. (2011). [The No-U-Turn Sampler: Adaptively Setting
# Path Lengths in Hamiltonian Monte
# Carlo](http://arxiv.org/abs/1111.4246).
| apache-2.0 |
giorgiop/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
cython-testbed/pandas | asv_bench/benchmarks/algorithms.py | 1 | 3331 | import warnings
from importlib import import_module
import numpy as np
import pandas as pd
from pandas.util import testing as tm
for imp in ['pandas.util', 'pandas.tools.hashing']:
try:
hashing = import_module(imp)
break
except (ImportError, TypeError, ValueError):
pass
class Factorize(object):
goal_time = 0.2
params = [True, False]
param_names = ['sort']
def setup(self, sort):
N = 10**5
self.int_idx = pd.Int64Index(np.arange(N).repeat(5))
self.float_idx = pd.Float64Index(np.random.randn(N).repeat(5))
self.string_idx = tm.makeStringIndex(N)
def time_factorize_int(self, sort):
self.int_idx.factorize(sort=sort)
def time_factorize_float(self, sort):
self.float_idx.factorize(sort=sort)
def time_factorize_string(self, sort):
self.string_idx.factorize(sort=sort)
class Duplicated(object):
goal_time = 0.2
params = ['first', 'last', False]
param_names = ['keep']
def setup(self, keep):
N = 10**5
self.int_idx = pd.Int64Index(np.arange(N).repeat(5))
self.float_idx = pd.Float64Index(np.random.randn(N).repeat(5))
self.string_idx = tm.makeStringIndex(N)
def time_duplicated_int(self, keep):
self.int_idx.duplicated(keep=keep)
def time_duplicated_float(self, keep):
self.float_idx.duplicated(keep=keep)
def time_duplicated_string(self, keep):
self.string_idx.duplicated(keep=keep)
class DuplicatedUniqueIndex(object):
goal_time = 0.2
def setup(self):
N = 10**5
self.idx_int_dup = pd.Int64Index(np.arange(N * 5))
# cache is_unique
self.idx_int_dup.is_unique
def time_duplicated_unique_int(self):
self.idx_int_dup.duplicated()
class Match(object):
goal_time = 0.2
def setup(self):
self.uniques = tm.makeStringIndex(1000).values
self.all = self.uniques.repeat(10)
def time_match_string(self):
with warnings.catch_warnings(record=True):
pd.match(self.all, self.uniques)
class Hashing(object):
goal_time = 0.2
def setup_cache(self):
N = 10**5
df = pd.DataFrame(
{'strings': pd.Series(tm.makeStringIndex(10000).take(
np.random.randint(0, 10000, size=N))),
'floats': np.random.randn(N),
'ints': np.arange(N),
'dates': pd.date_range('20110101', freq='s', periods=N),
'timedeltas': pd.timedelta_range('1 day', freq='s', periods=N)})
df['categories'] = df['strings'].astype('category')
df.iloc[10:20] = np.nan
return df
def time_frame(self, df):
hashing.hash_pandas_object(df)
def time_series_int(self, df):
hashing.hash_pandas_object(df['ints'])
def time_series_string(self, df):
hashing.hash_pandas_object(df['strings'])
def time_series_float(self, df):
hashing.hash_pandas_object(df['floats'])
def time_series_categorical(self, df):
hashing.hash_pandas_object(df['categories'])
def time_series_timedeltas(self, df):
hashing.hash_pandas_object(df['timedeltas'])
def time_series_dates(self, df):
hashing.hash_pandas_object(df['dates'])
from .pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
dflemin3/diskpy | diskpy/plot/_plot.py | 1 | 3082 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 16 15:15:23 2015
@author: ibackus
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import copy
def gridplot(nrows, ncols=1, square=False):
"""
Creates a grid of tightly-packed subplots and returns them as a numpy array,
shape (nrows,ncols). If nrows=ncols=1, a single subplot is made.
Currently not fully implemented for a 2D array
"""
# Just make a single subplot
if (nrows == 1) & (ncols == 1):
return plt.subplot(1,1,1)
# Create a grid
grid = mpl.gridspec.GridSpec(nrows,ncols)
grid.update(wspace=0., hspace=0.)
# Initialize subplots
ax = np.zeros((nrows,ncols), dtype=object)
counter = 0
for i in range(nrows):
for j in range(ncols):
if i > 0:
sharex = ax[0,j]
else:
sharex = None
if j > 0:
sharey = ax[i,0]
else:
sharey = None
ax[i,j] = plt.subplot(grid[counter], sharex = sharex, sharey = sharey)
if square:
ax[i,j].set(adjustable='box-forced', aspect='equal')
counter += 1
# Remove ticklabels inbetween plots
for i in range(nrows-1):
for j in range(ncols):
plt.setp(ax[i,j].get_xticklabels(), visible=False)
for i in range(nrows):
for j in range(1,ncols):
plt.setp(ax[i,j].get_yticklabels(), visible=False)
# If this is a 1-D grid, flatten ax
if (ncols == 1) or (nrows == 1):
ax = ax.flatten()
return ax
def heatmap(x, y, z, bins=10, plot=True, output=False):
"""
Creates a pcolor heatmap for z evaluated at (x,y). z is binned and
averaged according to x and y. x, y, and z should be 1-D arrays with the
same length.
IF bins = N, a pcolor plot of shape (N,N) is returned
IF bins = (M,N) [a tuple], a pcolor plot of shape (M,N) is returned
IF plot = True (default) a plot is created.
*** RETURNS ***
IF output = False, nothing is returned (default)
IF output = True:
Returns x_mesh, y_mesh, z_binned
x_mesh, y_mesh are the meshgrid x,y edges z is evaluted in. z_binned is
the average of z for each bin.
"""
N, x_binedges, y_binedges = np.histogram2d(x, y, bins = bins)
x_ind = np.digitize(x, x_binedges) - 1
y_ind = np.digitize(y, y_binedges) - 1
nx_bins = len(x_binedges) - 1
ny_bins = len(y_binedges) - 1
z_binned = np.zeros([nx_bins, ny_bins])
for i in range(nx_bins):
for j in range(ny_bins):
z_binned[i,j] = z[(x_ind==i) & (y_ind==j)].mean()
x_mesh, y_mesh = np.meshgrid(x_binedges, y_binedges, indexing = 'ij')
if plot:
cmap = copy.copy(mpl.cm.jet)
cmap.set_bad('w',1.)
masked_z = np.ma.array(z_binned, mask=np.isnan(z_binned))
plt.pcolormesh(x_mesh, y_mesh, masked_z, cmap = cmap)
plt.colorbar()
if output:
return x_mesh, y_mesh, z_binned | mit |
cbmoore/statsmodels | statsmodels/examples/tsa/ex_arma_all.py | 34 | 1982 |
from __future__ import print_function
import numpy as np
from numpy.testing import assert_almost_equal
import matplotlib.pyplot as plt
import statsmodels.sandbox.tsa.fftarma as fa
from statsmodels.tsa.descriptivestats import TsaDescriptive
from statsmodels.tsa.arma_mle import Arma
x = fa.ArmaFft([1, -0.5], [1., 0.4], 40).generate_sample(size=200, burnin=1000)
d = TsaDescriptive(x)
d.plot4()
#d.fit(order=(1,1))
d.fit((1,1), trend='nc')
print(d.res.params)
modc = Arma(x)
resls = modc.fit(order=(1,1))
print(resls[0])
rescm = modc.fit_mle(order=(1,1), start_params=[-0.4,0.4, 1.])
print(rescm.params)
#decimal 1 corresponds to threshold of 5% difference
assert_almost_equal(resls[0] / d.res.params, 1, decimal=1)
assert_almost_equal(rescm.params[:-1] / d.res.params, 1, decimal=1)
#copied to tsa.tests
plt.figure()
plt.plot(x, 'b-o')
plt.plot(modc.predicted(), 'r-')
plt.figure()
plt.plot(modc.error_estimate)
#plt.show()
from statsmodels.miscmodels.tmodel import TArma
modct = TArma(x)
reslst = modc.fit(order=(1,1))
print(reslst[0])
rescmt = modct.fit_mle(order=(1,1), start_params=[-0.4,0.4, 10, 1.],maxiter=500,
maxfun=500)
print(rescmt.params)
from statsmodels.tsa.arima_model import ARMA
mkf = ARMA(x)
##rkf = mkf.fit((1,1))
##rkf.params
rkf = mkf.fit((1,1), trend='nc')
print(rkf.params)
from statsmodels.tsa.arima_process import arma_generate_sample
np.random.seed(12345)
y_arma22 = arma_generate_sample([1.,-.85,.35, -0.1],[1,.25,-.7], nsample=1000)
##arma22 = ARMA(y_arma22)
##res22 = arma22.fit(trend = 'nc', order=(2,2))
##print 'kf ',res22.params
##res22css = arma22.fit(method='css',trend = 'nc', order=(2,2))
##print 'css', res22css.params
mod22 = Arma(y_arma22)
resls22 = mod22.fit(order=(2,2))
print('ls ', resls22[0])
resmle22 = mod22.fit_mle(order=(2,2), maxfun=2000)
print('mle', resmle22.params)
f = mod22.forecast()
f3 = mod22.forecast3(start=900)[-20:]
print(y_arma22[-10:])
print(f[-20:])
print(f3[-109:-90])
plt.show() | bsd-3-clause |
voxlol/scikit-learn | sklearn/cross_validation.py | 96 | 58309 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
p = np.concatenate([p for p, _ in preds_blocks])
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
preds = p.copy()
preds[locs] = p
return preds
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
stratify = options.pop('stratify', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
mojoboss/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
rexshihaoren/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 223 | 1976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
In this example, we fit a Gaussian Process model onto the diabetes
dataset.
We determine the correlation parameters with maximum likelihood
estimation (MLE). We use an anisotropic squared exponential
correlation model with a constant regression model. We also use a
nugget of 1e-2 to account for the (strong) noise in the targets.
We compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/cogent/draw/legend.py | 1 | 3200 | #!/usr/bin/env python
from __future__ import division
from cogent.core import moltype, annotation
from matplotlib.collections import PatchCollection
from matplotlib.text import Text
from matplotlib.transforms import Affine2D
from cogent.draw.rlg2mpl import Group, Drawable, figureLayout
from cogent.draw.linear import Display, DisplayPolicy
__author__ = "Peter Maxwell and Gavin Huttley"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Gavin Huttley", "Peter Maxwell", "Rob Knight"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Gavin Huttley"
__email__ = "gavin.huttley@anu.edu.au"
__status__ = "Production"
class Legend(Drawable):
"""A class for drawing a legend for a display policy
Arguments:
- policy: a reference to a Display policy class"""
def __init__(self, policy = DisplayPolicy):
self.policy = policy
def _makeSampleSequence(self, feature_type):
seq = moltype.DNA.makeSequence('aaaccggttt' * 7)
v = seq.addAnnotation(annotation.Feature,
feature_type, feature_type, [(2,3)])
v = seq.addAnnotation(annotation.Feature,
feature_type, feature_type, [(7,18)])
v = seq.addAnnotation(annotation.Feature,
feature_type, feature_type, [(20,70)])
return seq
def populateAxes(self, ax, columns = 3):
""" Returns the legend as a matplotlib artist
Arguments:
- columns: the number of columns of feature / representation
pairs
"""
ax.set_xlim(0, 600)
ax.set_ylim(-800, 50)
result = []
x = y = 0
for track in self.policy()._makeTrackDefns():
if track.tag is None or track.tag=="Graphs":
continue
ax.text(10, y*30, track.tag)
y -= 1
for feature in track:
seq = self._makeSampleSequence(feature)
display = Display(seq,
policy = self.policy,
min_feature_height = 10,
show_code = False,
pad = 0,)
sample = display.makeArtist()
#trans = sample.get_transform()
#offset = Affine2D()
#offset.translate(x*600+20 / columns, y*30)
sample.translate(x*600/columns+10, y*30)
ax.add_artist(sample)
ax.text(x*600/columns+90, y*30, feature)
x += 1
if x % columns == 0:
x = 0
y -= 1
if x:
x = 0
y -= 1
ax.axhline((y+.7)*30)
def makeFigure(self, margin=0, default_aspect=1.3, **kw):
kw['margin'] = margin
kw['default_aspect'] = default_aspect
(width, height), posn, kw = figureLayout(leftovers=True, **kw)
fig = self._makeFigure(width, height)
ax = fig.add_axes(posn, adjustable="datalim",
frame_on=False, xticks=[], yticks=[])
g = self.populateAxes(ax, **kw)
return fig
if __name__ == '__main__':
Legend().showFigure()
| mit |
CTJChen/ctc_astropylib | ctc_stat.py | 1 | 5941 | import scipy.stats.distributions as dist
import numpy as np
from astropy.coordinates import Distance
from sklearn.neighbors import NearestNeighbors
def bayes_ci(k, n, sigma=None):
'''
Calculate confidence interval using the binomial
distribution/bayesian methods described in Cameron et al. 2011
'''
sig = {'1': 0.68268949, '2': 0.95449974,
'3': 0.99730020, '4': 0.99993666, '5': 0.99999943}
if sigma is None:
c = 0.683
elif sigma in sig:
c = sig[sigma]
else:
return 'sigma = 1~5 only'
err_lower = k/n - dist.beta.ppf((1-c)/2., k+1, n-k+1)
err_upper = dist.beta.ppf(1-(1-c)/2., k+1, n-k+1) - k/n
return np.array([err_lower, err_upper])
def dmod(redshift,distance=None):
if distance is not None:
dist = distance.to(parsec).value/10.
else:
dist = Distance(z=redshift).parsec/10.
dm=5*np.log10(dist-5)
return dm
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
def get_distnn(ra, dec, algorithm='auto'):
'''
Use sklearn.neighbors.NearestNeighbors
to compute the distance to nearest neighbors for a set of RA and dec
ra, dec should be in degrees (floats or doubles)
the outputs are:
distnn and idnn
distnn is in arcsec by default.
The default algorithm is auto,
but scikitlearn allows the following options:
['auto', 'ball_tree', 'kd_tree', 'brute']
'''
X = np.vstack((ra,dec)).transpose()
nbrs = NearestNeighbors(n_neighbors=2, algorithm=algorithm).fit(X)
distances, indices = nbrs.kneighbors(X)
distnn = distances[:,1]*3600.
idnn = indices[:,1]
return distnn,idnn
def sample_empirical_dist(xarr, size=None, finite=True):
# https://stackoverflow.com/questions/2745329/
# how-to-make-scipy-interpolate-give-an-extrapolated-result-beyond-the-input-range
from scipy.interpolate import interp1d
from statsmodels.distributions.empirical_distribution import ECDF
ecdf = ECDF(x)
def extrap1d(interpolator):
xs = interpolator.x
ys = interpolator.y
def pointwise(x):
if x < xs[0]:
return ys[0]+(x-xs[0])*(ys[1]-ys[0])/(xs[1]-xs[0])
elif x > xs[-1]:
return ys[-1]+(x-xs[-1])*(ys[-1]-ys[-2])/(xs[-1]-xs[-2])
else:
return interpolator(x)
def ufunclike(xs):
return array(list(map(pointwise, array(xs))))
return ufunclike
inv_cdf = extrap1d(interp1d(ecdf.y,ecdf.x,
bounds_error=False, assume_sorted=True))
if size is None:
# if size is not set, the output array has the same length as input x-array
size = len(xarr)
r = np.random.uniform(0, 1, size)
ys = inv_cdf(r)
if finite:
while sum(~np.isfinite(ys)) > 1:
ys[~np.isfinite(ys)] = inv_cdf(np.random.uniform(0, 1, sum(~np.isfinite(ys))))
return ys
| apache-2.0 |
hugobowne/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 38 | 6118 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_pickle(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
yield self.check_pickle, metric, kwargs
for metric in self.bool_metrics:
yield self.check_pickle_bool, metric
def check_pickle_bool(self, metric):
dm = DistanceMetric.get_metric(metric)
D1 = dm.pairwise(self.X1_bool)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(self.X1_bool)
assert_array_almost_equal(D1, D2)
def check_pickle(self, metric, kwargs):
dm = DistanceMetric.get_metric(metric, **kwargs)
D1 = dm.pairwise(self.X1)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(self.X1)
assert_array_almost_equal(D1, D2)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
| bsd-3-clause |
bmazin/ARCONS-pipeline | control/DisplayStack.py | 1 | 23657 | #!/bin/python
'''
Author: Paul Szypryt Date: July 11, 2013
'''
import numpy as np
from util.ObsFile import ObsFile
from util.FileName import FileName
from util.readDict import readDict
from util import utils
import tables
import matplotlib.pyplot as plt
import hotpix.hotPixels as hp
import os
from time import time
import sys
import LoadImageStack
from tables import *
from PyQt4.QtGui import *
from PyQt4.QtGui import *
from DisplayStack_gui import Ui_DisplayStack_gui
class DisplayStack(QMainWindow):
def __init__(self):
# Start up gui
QWidget.__init__(self, parent=None)
self.ui = Ui_DisplayStack_gui()
self.ui.setupUi(self)
# Initialize Variables
self.initializeVariables()
# Lists and buttons used for specifying run and target information.
# Click item in runList to select a run. Run number corresponds to array index. Load up corresponding target list.
self.ui.runList.itemClicked.connect(self.selectRun)
# Click item in targetList to select a target.
self.ui.targetList.itemClicked.connect(self.selectTarget)
# Click button in
self.ui.targetButton.clicked.connect(self.loadTarget)
self.ui.sunsetList.itemClicked.connect(self.createObsList)
# Use wavelength calibration checkbox
self.ui.wavelengthCalibrationBox.clicked.connect(self.useWaveCal)
# Use flat calibration checkbox
self.ui.flatCalibrationBox.clicked.connect(self.useFlatCal)
# Buttons for obs list creation
self.ui.addButton.clicked.connect(self.addObservation)
self.ui.removeButton.clicked.connect(self.removeObservation)
self.ui.clearButton.clicked.connect(self.createObsList)
# Start process button
self.ui.stackButton.clicked.connect(self.stackProcess)
# Load image stack button
self.ui.loadStackButton.clicked.connect(self.chooseStack)
# Initialize Variables
def initializeVariables(self):
# Define path names
self.displayStackPath = '/Scratch/DisplayStack/'
self.defaultWavelengthPath = '/Scratch/waveCalSolnFiles/PAL2012/master_cals/'
self.defaultFlatPath = '/Scratch/flatCalSolnFiles/'
# Load and display list of run names from /Scratch/DisplayStack/runList.dict
self.loadRunData()
# Load list of target names from /Scratch/DisplayStack/runName/runName.dict, for all runs
self.loadTargetData()
# Function to load list of run names. Runs on initialization.
def loadRunData(self):
# Load run data from /Scratch/DisplayStack/runList.dict
self.runData = readDict()
self.runData.read_from_file(self.displayStackPath + '/runList.dict')
self.runNames = np.array(self.runData['runs'])
# Populate runList table with run names.
for iRun in range(len(self.runNames)):
self.ui.runList.addItem(self.runNames[iRun])
# Function to load a table of target names.
def loadTargetData(self):
self.targetNames = []
# Cycle through runs and extract target name information from various dictionaries.
for iRun in range(len(self.runNames)):
self.targetData = readDict()
self.targetData.read_from_file(self.displayStackPath + self.runNames[iRun] + '/' + self.runNames[iRun] + '.dict')
self.iTargets = np.array(self.targetData['targets'])
self.targetNames.append(self.iTargets)
# Function to select a run and populate the target list for that particular run.
def selectRun(self):
# Clear list of target information for previously selected run.
self.ui.targetList.clear()
# Define a run number by the index of the selected run.
self.runNumber = self.ui.runList.row(self.ui.runList.currentItem())
# Populate targetList table with target names for selected run.
for iTarget in range(len(self.targetNames[self.runNumber])):
self.ui.targetList.addItem(self.targetNames[self.runNumber][iTarget])
def selectTarget(self):
self.targetNumber = self.ui.targetList.row(self.ui.targetList.currentItem())
def loadTarget(self):
self.run = self.runNames[self.runNumber]
self.target = self.targetNames[self.runNumber][self.targetNumber]
try:
self.paramName = self.displayStackPath + self.run + '/' + self.target + '/' + self.target + '.dict'
self.paramData = readDict()
self.paramData.read_from_file(self.paramName)
self.obsTimes = np.array(self.paramData['obsTimes'])
self.utcDates = self.paramData['utcDates']
self.sunsetDates = self.paramData['sunsetDates']
self.calTimestamps = self.paramData['calTimestamps']
self.flatCalDates = self.paramData['flatCalDates']
self.RA = self.paramData['RA']
self.Dec = self.paramData['Dec']
self.hourAngleOffset = self.paramData['HA_offset']
print 'Loading parameter file at ' + self.displayStackPath + self.run + '/' + self.target + '/' + self.target + '.dict'
self.createSunsetList()
#self.createObsList()
self.createWavelengthList()
self.createFlatList()
self.paramFileExists = True
except IOError:
print 'No existing parameter file at ' + self.displayStackPath + self.run + '/' + self.target + '/' + self.target + '.dict'
self.ui.sunsetList.clear()
self.ui.obsList.clear()
self.ui.inputList.clear()
self.ui.wavelengthList.clear()
self.ui.flatList.clear()
self.paramFileExists = False
# Choose Obs File
# Create list of available sunset dates
def createSunsetList(self):
self.ui.sunsetList.clear()
for iDate in range(len(self.sunsetDates)):
self.ui.sunsetList.addItem(self.sunsetDates[iDate])
# Create Initial Obs file list
def createObsList(self):
self.ui.obsList.clear()
self.ui.inputList.clear()
self.currentSunsetDate = self.sunsetDates[self.ui.sunsetList.row(self.ui.sunsetList.currentItem())]
self.currentUTCDate = self.utcDates[self.ui.sunsetList.row(self.ui.sunsetList.currentItem())]
self.singleDayObservations = self.obsTimes[self.ui.sunsetList.row(self.ui.sunsetList.currentItem())]
for iObs in range(len(self.singleDayObservations)):
self.ui.obsList.addItem(self.singleDayObservations[iObs])
# Add observation to input list
def addObservation(self):
self.selectedObs = self.ui.obsList.currentItem()
self.ui.obsList.takeItem(self.ui.obsList.row(self.selectedObs))
self.ui.inputList.addItem(self.selectedObs)
self.ui.inputList.sortItems()
# Remove observation from input list
def removeObservation(self):
self.removedObs = self.ui.inputList.currentItem()
self.ui.inputList.takeItem(self.ui.inputList.row(self.removedObs))
self.ui.obsList.addItem(self.removedObs)
self.ui.obsList.sortItems()
# Load settings
def loadSettings(self):
self.validSettings = True
# Run and target information
self.run = self.runNames[self.runNumber]
self.target = self.targetNames[self.runNumber][self.targetNumber]
# General settings
self.integrationTime = int(self.ui.integrationTimeLine.text())
self.useTimeAdjustment = self.ui.timeAdjustmentBox.isChecked()
self.useHotPixelMasking = self.ui.hotPixelBox.isChecked()
# Wavelength calibration settings
self.useWavelengthCalibration = self.ui.wavelengthCalibrationBox.isChecked()
self.useBestWavelengthCalibration = self.ui.bestWavelengthCalibrationBox.isChecked()
self.lowerWavelengthCutoff = float(self.ui.lowerWavelengthCutoffLine.text())
self.upperWavelengthCutoff = float(self.ui.upperWavelengthCutoffLine.text())
# Flat calibration settings
self.useFlatCalibration = self.ui.flatCalibrationBox.isChecked()
self.useDeadPixelMasking = self.ui.deadPixelBox.isChecked()
self.fileCount = self.ui.inputList.count()
self.weighted = self.useFlatCalibration
self.useRawCounts = not (self.useWavelengthCalibration and self.useFlatCalibration)
self.scaleByEffInt = self.useHotPixelMasking
if self.ui.sunsetList.currentItem() != None:
if self.fileCount == 0:
print 'Please select files to process...'
self.validSettings = False
if self.useWavelengthCalibration:
if self.useBestWavelengthCalibration:
print 'Using best wavelength calibration...'
elif self.ui.wavelengthList.currentItem() == None:
print 'Please select wavelength calibration...'
self.validSettings = False
else:
self.selectedWvlCal = self.ui.wavelengthList.currentItem().text()
print "Chose wvl cal: ", str(self.defaultWavelengthPath+self.selectedWvlCal)
self.wvlCalFilename = str(self.defaultWavelengthPath+self.selectedWvlCal)
#self.wvlCalFilename = str(FileName(run=self.run,date=self.currentSunsetDate,tstamp=self.selectedWvlCal).calSoln())
if self.useFlatCalibration:
if self.ui.flatList.currentItem() == None:
print 'Please select flat calibration...'
self.validSettings = False
else:
self.flatCalNight = self.ui.flatList.currentItem().text()
self.flatCalFilename = str(FileName(run=self.run,date=self.flatCalNight).flatSoln())
else:
print 'Please select sunset night...'
self.validSettings = False
# Load hot pixel mask
def loadHotMask(self):
#self.hotPixelFilename = str(self.displayStackPath + self.run + '/' + self.target + '/HotPixelMasks/hotPixelMask_' + self.obsTS + '.h5')
self.hotPixelFilename = str(FileName(obsFile = self.ob).timeMask())
if not os.path.exists(self.hotPixelFilename):
hp.findHotPixels(obsFile=self.ob,outputFileName=self.hotPixelFilename)
print "Hot pixel mask saved to %s"%(self.hotPixelFilename)
self.ob.loadHotPixCalFile(self.hotPixelFilename,switchOnMask=True)
# Create wavelength cal file list
def createWavelengthList(self):
self.ui.wavelengthList.clear()
for iCal in range(len(self.calTimestamps)):
self.ui.wavelengthList.addItem(self.calTimestamps[iCal])
# Enable/disable wavecal options
def useWaveCal(self):
if self.ui.wavelengthCalibrationBox.isChecked():
self.ui.lowerWavelengthCutoffLine.setEnabled(True)
self.ui.lowerWavelengthCutoffLabel.setEnabled(True)
self.ui.upperWavelengthCutoffLine.setEnabled(True)
self.ui.upperWavelengthCutoffLabel.setEnabled(True)
self.ui.wavelengthList.setEnabled(True)
self.ui.flatCalibrationBox.setEnabled(True)
self.ui.flatCalibrationBox.setChecked(True)
self.ui.deadPixelBox.setEnabled(True)
self.ui.deadPixelBox.setChecked(True)
self.ui.flatList.setEnabled(True)
self.ui.bestWavelengthCalibrationBox.setEnabled(True)
self.ui.bestWavelengthCalibrationBox.setChecked(True)
else:
self.ui.lowerWavelengthCutoffLine.setEnabled(False)
self.ui.lowerWavelengthCutoffLabel.setEnabled(False)
self.ui.upperWavelengthCutoffLine.setEnabled(False)
self.ui.upperWavelengthCutoffLabel.setEnabled(False)
self.ui.wavelengthList.setEnabled(False)
self.ui.flatCalibrationBox.setEnabled(False)
self.ui.flatCalibrationBox.setChecked(False)
self.ui.deadPixelBox.setChecked(False)
self.ui.deadPixelBox.setEnabled(False)
self.ui.flatList.setEnabled(False)
self.ui.bestWavelengthCalibrationBox.setEnabled(False)
self.ui.bestWavelengthCalibrationBox.setChecked(False)
# Create flat cal file list
def createFlatList(self):
self.ui.flatList.clear()
for iCal in range(len(self.flatCalDates)):
self.ui.flatList.addItem(self.flatCalDates[iCal])
# Enable/disable flatcal options
def useFlatCal(self):
if self.ui.flatCalibrationBox.isChecked():
self.ui.deadPixelBox.setEnabled(True)
self.ui.deadPixelBox.setChecked(True)
self.ui.flatList.setEnabled(True)
else:
self.ui.deadPixelBox.setChecked(False)
self.ui.deadPixelBox.setEnabled(False)
self.ui.flatList.setEnabled(False)
# Load dead pixel mask
def loadDeadMask(self):
self.deadPixelFilename = str(self.displayStackPath + self.run + '/' + self.target + '/DeadPixelMasks/deadPixelMask_' + self.obsTS + '.npz')
if not os.path.exists(self.deadPixelFilename):
self.deadMask = self.ob.getDeadPixels()
np.savez(self.deadPixelFilename,deadMask = self.deadMask)
print "Dead pixel mask saved to %s"%(self.deadPixelFilename)
else:
self.deadFile = np.load(self.deadPixelFilename)
self.deadMask = self.deadFile['deadMask']
# Describe the structure of the header row
class headerDescription(tables.IsDescription):
targetName = tables.StringCol(100, dflt='')
run = tables.StringCol(100, dflt='')
obsFileName = tables.StringCol(100, dflt='')
wvlCalFileName = tables.StringCol(100, dflt=np.nan)
flatCalFileName = tables.StringCol(100, dflt='')
deadPixFileName = tables.StringCol(100, dflt='')
hotPixFileName = tables.StringCol(100, dflt='')
nCol = tables.UInt32Col(dflt=-1)
nRow = tables.UInt32Col(dflt=-1)
lowWvlCutoff = tables.Float64Col(dflt=np.nan)
highWvlCutoff = tables.Float64Col(dflt=np.nan)
exptime = tables.Float64Col(dflt=np.nan)
lst = tables.StringCol(100, dflt='')
integrationTime = tables.Float64Col(dflt=np.nan)
RA = tables.StringCol(100, dflt='')
Dec = tables.StringCol(100, dflt='')
HA_offset = tables.Float64Col(dflt=0.0)
# Create output name
def createOutputName(self):
self.rawName = str(self.displayStackPath + self.run + '/' + self.target + '/ImageStacks/' + 'ImageStack_' + self.obsTS + '_' + str(self.integrationTime) + 's')
if self.useWavelengthCalibration and self.useHotPixelMasking:
self.outputFilename = str(self.rawName + '_' + str(int(self.lowerWavelengthCutoff)) + '-' + str(int(self.upperWavelengthCutoff)) + '_hp.h5')
elif self.useWavelengthCalibration and not self.useHotPixelMasking:
self.outputFilename = str(self.rawName + '_' + str(int(self.lowerWavelengthCutoff)) + '-' + str(int(self.upperWavelengthCutoff)) + '.h5')
elif not self.useWavelengthCalibration and self.useHotPixelMasking:
self.outputFilename = str(self.rawName + '_hp.h5')
else:
self.outputFilename = str(self.rawName + '.h5')
def createH5File(self):
# Create header and data group and table names
headerGroupName = 'header'
headerTableName = 'header'
dataGroupName = 'stack'
dataTableName = 'stack'
timeTableName = 'time'
# Create lookup names for header information
runColName = 'run'
targetColName = 'targetName'
obsFileColName = 'obsFileName'
wvlCalFileColName = 'wvlCalFileName'
flatCalFileColName = 'flatCalFileName'
nRowColName = 'nRow'
nColColName = 'nCol'
RAColName = 'RA'
DecColName = 'Dec'
deadPixColName = 'deadPixFileName'
hotPixColName = 'hotPixFileName'
lowWvlColName = 'lowWvlCutoff'
highWvlColName = 'highWvlCutoff'
expTimeColName = 'exptime'
lstColName = 'lst'
integrationTimeColName = 'integrationTime'
HA_offsetColName = 'HA_offset'
# Create and h5 output file, create header and data groups
fileh = tables.openFile(self.outputFilename, mode='w')
headerGroup = fileh.createGroup("/", headerGroupName, 'Header')
stackGroup = fileh.createGroup("/", dataGroupName, 'Image Stack')
# Create row for header information
headerTable = fileh.createTable(headerGroup, headerTableName, self.headerDescription,
'Header Info')
header = headerTable.row
# Fill in the header with possibly useful information.
header[runColName] = self.run
header[targetColName] = self.target
header[obsFileColName] = self.obsFn
header[nColColName] = self.numberCols
header[nRowColName] = self.numberRows
header[RAColName] = self.RA
header[DecColName] = self.Dec
header[expTimeColName] = self.exptime
header[lstColName] = self.lst
header[integrationTimeColName] = self.integrationTime
header[HA_offsetColName] = self.hourAngleOffset
if self.useDeadPixelMasking:
header[deadPixColName] = self.deadPixelFilename
if self.useHotPixelMasking:
header[hotPixColName] = self.hotPixelFilename
if self.useWavelengthCalibration:
header[wvlCalFileColName] = self.ob.wvlCalFileName
header[lowWvlColName] = self.lowerWavelengthCutoff
header[highWvlColName] = self.upperWavelengthCutoff
if self.useFlatCalibration:
header[flatCalFileColName] = self.flatCalFilename
header.append()
# Create an h5 array for the midtime of each frame in the image cube.
timeTable = fileh.createCArray(stackGroup, timeTableName, Float64Atom(), (1,len(self.times)))
timeTable[:] = self.times
# Create an h5 table for the image cube.
stackTable = fileh.createCArray(stackGroup, dataTableName, Float64Atom(), (self.numberRows,self.numberCols, self.cube.shape[2]))
stackTable[:] = self.cube
# Flush the h5 output file
fileh.flush()
fileh.close()
# Start process for creating image stacks
def stackProcess(self):
# Check for valid params file
if self.paramFileExists:
# Load settings choosen from gui
self.loadSettings()
if self.validSettings:
# Loop through all files in input list
for iFile in range(self.fileCount):
# Create ObsFile instance
self.obsTS = str(self.currentUTCDate) + '-' + self.ui.inputList.item(iFile).text()
self.obsFn = str(FileName(run=self.run,date=self.currentSunsetDate,tstamp=self.obsTS).obs())
print 'Processing file ' + self.obsFn + '...'
self.ob = ObsFile(self.obsFn)
self.numberRows = self.ob.nRow
self.numberCols = self.ob.nCol
# Load time adjustment file
if self.useTimeAdjustment:
print 'Loading time adjustment file...'
self.ob.loadTimeAdjustmentFile(FileName(run=self.run).timeAdjustments())
# Load hot pixel mask
if self.useHotPixelMasking:
print 'Loading hot pixel mask...'
self.loadHotMask()
# Load wave cal solution
if self.useWavelengthCalibration:
if self.useBestWavelengthCalibration:
print 'Loading best wavelength calibration...'
self.ob.loadBestWvlCalFile()
else:
print 'Loading selected wavelength calibration...'
self.ob.loadWvlCalFile(self.wvlCalFilename)
# Load flatcal solution
if self.useFlatCalibration:
print 'Loading flat calibration...'
self.ob.loadFlatCalFile(self.flatCalFilename)
# Load dead pixel mask
if self.useDeadPixelMasking:
print 'Loading dead pixel mask...'
self.loadDeadMask()
# Set wavelength cutoffs
if self.useWavelengthCalibration:
print 'Setting wavelength cutoffs...'
self.ob.setWvlCutoffs(self.lowerWavelengthCutoff,self.upperWavelengthCutoff)
# Timing
self.unix = self.ob.getFromHeader('unixtime')
self.startJD = self.unix/86400.+2440587.5
self.exptime = self.ob.getFromHeader('exptime')
self.lst = self.ob.getFromHeader('lst')
self.times = []
self.frames = []
# Create Image Stack
print 'Stacking images...'
for iSec in np.arange(0,self.exptime,self.integrationTime):
#add seconds offset to julian date, move jd to center of bin
self.jd = self.startJD + iSec/(24.*3600.) + self.integrationTime/2./(24.*3600.)
self.times.append(self.jd)
print 'Creating frame for time ' + str(self.jd)
self.frameData = self.ob.getPixelCountImage(firstSec=iSec,integrationTime=self.integrationTime,weighted=self.weighted,getRawCount=self.useRawCounts,scaleByEffInt=self.scaleByEffInt)
self.frame = self.frameData['image']
if self.ui.verticalFlipBox.isChecked():
self.frame = np.flipud(self.frame)
if self.useDeadPixelMasking:
self.frame[self.deadMask == 0] = np.nan
self.frames.append(self.frame)
self.cube = np.dstack(self.frames)
self.times = np.array(self.times)
# Create output file
self.createOutputName()
print 'Saving image stack to ' + self.outputFilename
self.createH5File()
# Invalid params file
else:
print 'Invalid parameter file...'
# Choose an image stack
def chooseStack(self):
self.defaultLoadStackDirectory = str(self.displayStackPath)
self.stackName = ''
self.stackName = QFileDialog.getOpenFileName(parent=None, directory=self.defaultLoadStackDirectory, caption=str("Choose Image Stack"), filter=str("H5 (*.h5)"))
if self.stackName == '':
print 'No file chosen'
else:
loadStackApp = LoadImageStack.LoadImageStack(stackName = self.stackName)
loadStackApp.show()
loadStackApp.exec_()
# Start up main gui
if __name__ == "__main__":
app = QApplication(sys.argv)
myapp = DisplayStack()
myapp.show()
app.exec_()
| gpl-2.0 |
ElDeveloper/american-gut-web | amgut/lib/locale_data/american_gut.py | 1 | 136722 | #!/usr/bin/env python
from __future__ import division
from amgut.lib.config_manager import AMGUT_CONFIG
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The American Gut Project Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
# Any media specific localizations
HELP_EMAIL = "info@americangut.org"
_SITEBASE = AMGUT_CONFIG.sitebase
media_locale = {
'LOCALE': AMGUT_CONFIG.locale,
'SITEBASE': _SITEBASE,
'LOGO': _SITEBASE + '/static/img/ag_logo.jpg',
'ANALYTICS_ID': 'UA-55353353-1',
'LATITUDE': 39.83,
'LONGITUDE': -99.89,
'ZOOM': 4,
'STEPS_VIDEO': "http://player.vimeo.com/video/63542787",
'ADD_PARTICIPANT': 'http://player.vimeo.com/video/63931218',
'ADD_PARTICIPANT_IMG_1': _SITEBASE + "/static/img/add_participant.png",
'ADD_PARTICIPANT_IMG_MENU': _SITEBASE + "/static/img/add_participant_menu.png",
'LOG_SAMPLE_OPTS': _SITEBASE + "/static/img/log_sample_options.png",
'ADD_SAMPLE_HIGHLIGHT': _SITEBASE + "/static/img/add_sample_highlight.png",
'ADD_SAMPLE_OVERVIEW': _SITEBASE + "/static/img/add_sample_overview.png",
'FAQ_AMBIGUOUS_PASS': _SITEBASE + '/static/img/creds_example.png',
'SAMPLE_BARCODE': _SITEBASE + '/static/img/sample_barcode.jpg',
'SWAB_HANDLING': 'http://player.vimeo.com/video/62393487',
'HELP_EMAIL': HELP_EMAIL,
'PROJECT_TITLE': AMGUT_CONFIG.project_name,
'FAVICON': _SITEBASE + '/static/img/favicon.ico',
'FUNDRAZR_URL': 'https://fundrazr.com/campaigns/4Tqx5',
'NAV_PARTICIPANT_RESOURCES': 'Participant resources',
'NAV_HOME': 'Home',
'NAV_MICROBIOME_101': '%s 101' % AMGUT_CONFIG.project_shorthand,
'NAV_FAQ': 'FAQ',
'NAV_MICROBIOME_FAQ': 'Human Microbiome FAQ',
'NAV_ADDENDUM': 'How do I interpret my results?',
'NAV_PRELIM_RESULTS': 'Preliminary results!',
'NAV_CHANGE_PASSWORD': 'Change Password',
'NAV_CONTACT_US': 'Contact Us',
'NAV_LOGOUT': 'Log out',
'NAV_HUMAN_SAMPLES': 'Human Samples',
'NAV_RECEIVED': '(Received)',
'NAV_ADD_HUMAN': 'Add Human Source',
'NAV_ANIMAL_SAMPLES': 'Animal Samples',
'NAV_ADD_ANIMAL': 'Add Animal Source',
'NAV_ENV_SAMPLES': 'Environmental Samples',
'NAV_LOG_SAMPLE': 'Log Sample',
'NAV_JOIN_PROJECT': 'Join The Project',
'NAV_KIT_INSTRUCTIONS': 'Kit Instructions',
'NAV_PARTICIPANT_LOGIN': 'Participant Log In',
'NAV_REGISTER_KIT': 'Register Kit',
'NAV_FORGOT_KITID': 'I forgot my kit ID',
'NAV_INTERNATIONAL': 'International Shipping',
'NAV_FORGOT_PASSWORD': 'I forgot my password',
'ADDENDUM_CERT_TITLE': _SITEBASE + '/static/img/Michael_Pollan_mod-01.png',
'ADDENDUM_CERT_NAME': _SITEBASE + '/static/img/Michael_Pollan_mod-01b.png',
'ADDENDUM_CERT_HEADER': _SITEBASE + '/static/img/Michael_Pollan_mod-02.png',
'ADDENDUM_CERT_BARCHART': _SITEBASE + '/static/img/Michael_Pollan_mod-11.png',
'ADDENDUM_CERT_BARCHART_LEGEND': _SITEBASE + '/static/img/Michael_Pollan_mod-12.png',
'ADDENDUM_CERT_ABUNDANT_MICROBES': _SITEBASE + '/static/img/Michael_Pollan_mod-13.png',
'ADDENDUM_CERT_ENRICHED_MICROBES': _SITEBASE + '/static/img/Michael_Pollan_mod-14.png',
'ADDENDUM_CERT_RARE_MICROBES': _SITEBASE + '/static/img/Michael_Pollan_mod-15.png',
'ADDENDUM_CERT_HEADER_PCOA': _SITEBASE + '/static/img/Michael_Pollan_mod-03.png',
'ADDENDUM_CERT_PCOA_LEGEND': _SITEBASE + '/static/img/Michael_Pollan_mod-04.png',
'ADDENDUM_CERT_PCOA_BODYSITES': _SITEBASE + '/static/img/Michael_Pollan_mod-08.png',
'ADDENDUM_CERT_PCOA_AGES_POP': _SITEBASE + '/static/img/Michael_Pollan_mod-09.png',
'ADDENDUM_CERT_PCOA_AG_POPULATION': _SITEBASE + '/static/img/Michael_Pollan_mod-10.png',
'ADDENDUM_TAX_BARCHART': _SITEBASE + '/static/img/TaxFig.png',
'ADDENDUM_PCOA_BODYSITES': _SITEBASE + '/static/img/PCoA1.png',
'ADDENDUM_PCOA_AGES_POPS': _SITEBASE + '/static/img/PCoA2.png',
'ADDENDUM_PCOA_AG_POPULATION': _SITEBASE + '/static/img/PCoA3.png',
'PORTAL_DIET_QUESTIONS': _SITEBASE + '/static/img/diet_questions.png',
'PORTAL_SHIPPING': _SITEBASE + '/static/img/shipping.png',
'EMAIL_ERROR': "There was a problem sending your email. Please contact us directly at <a href='mailto:%(help_email)s'>%(help_email)s</a>" % {'help_email': HELP_EMAIL},
'EMAIL_SENT': 'Your message has been sent. We will reply shortly',
'SHIPPING_ADDRESS': "University of California, San Diego<br>Knight Lab/ATTN: Greg Humphrey<br>BRF II Room 1220D<br>9500 Gilman Drive, MC 0763<br>La Jolla, CA 92093-0763",
}
_HANDLERS = {
'PARTICIPANT_EXISTS': 'Participant %s already exists!',
'MISSING_NAME_EMAIL': 'Missing participant name or email. Please retry, adding all required information.',
'SUCCESSFULLY_ADDED': "Successfully added %s!",
'SUCCESSFULLY_EDITED': "Successfully edited %s!",
'AUTH_REGISTER_SUBJECT': "%(project_shorthand)s Verification Code" % {'project_shorthand': AMGUT_CONFIG.project_shorthand},
'AUTH_REGISTER_PGP': "\n\nFor the PGP cohort, we are requesting that you collect one sample from each of the following sites:\n\nLeft hand\nRight hand\nForehead\nMouth\nFecal\n\nThis is important to ensure that we have the same types of samples for all PGP participants which, in turn, could be helpful in downstream analysis when looking for relationships between the microbiome and the human genome\n\n.",
'AUTH_REGISTER_BODY': "Thank you for registering with the %(project_name)s! Your verification code is:\n\n{0}\n\nYou will need this code to verifiy your kit on the %(project_shorthand)s webstite. To get started, please log into:\n\nhttp://microbio.me/AmericanGut\n\nEnter the kit_id and password found inside your kit, verify the contents of your kit, and enter the verification code found in this email.{1}\n\nSincerely,\nThe %(project_shorthand)s Team" % {'project_shorthand': AMGUT_CONFIG.project_shorthand, 'project_name': AMGUT_CONFIG.project_name},
'KIT_REG_SUCCESS': 'Kit registered successfully.',
'INVALID_KITID': "Invalid Kit ID or Password",
'ADD_KIT_ERROR': "Could not add kit to database. Did you hit the back button while registering and press 'register user' again?",
'ADD_BARCODE_ERROR': "Could not add barcode to database. Did you hit the back button while registering and press 'register user' again?",
'CHANGE_PASS_BODY': 'This is a courtesy email to confirm that you have changed your password for your kit with ID %s. If you did not request this change, please email us immediately at {0}'.format(media_locale['HELP_EMAIL']),
'CHANGE_PASS_SUBJECT': '%(project_shorthand)s Password Reset' % {'project_shorthand': AMGUT_CONFIG.project_shorthand},
'RESET_PASS_BODY': 'The password on American Gut Kit ID %s has been reset please click the link below within two hours\nhttp://microbio.me/americangut/change_pass_verify/?email=%s&kitid=%s&passcode=%s',
'MINOR_PARENTAL_BODY': "Thank you for your interest in this study. Because of your status as a minor, we will contact you within 24 hours to verify parent/guardian consent.",
'MESSAGE_SENT': "Your message has been sent. We will reply shortly",
'KIT_IDS_BODY': 'Your {1} Kit IDs are %s. You are receiving this email because you requested your Kit ID from the {1} web page If you did not request your Kit ID please email {0} Thank you,\n The {1} Team\n'.format(media_locale['HELP_EMAIL'], AMGUT_CONFIG.project_shorthand),
'KIT_IDS_SUBJECT': '%(project_shorthand)s Kit ID' % {'project_shorthand': AMGUT_CONFIG.project_shorthand},
'BARCODE_ERROR': "ERROR: No barcode was requested",
'AUTH_SUBJECT': "You have registered your kit! Your verification code is below.",
'REGISTER_KIT': 'Kit has not been registered. Please click "Register Kit" link.'
}
# Template specific dicts
_FAQ = {
'FAQ_HEADER': "%(shorthand)s FAQ" % {"shorthand": AMGUT_CONFIG.project_shorthand},
'LOG_IN_WHAT_NOW_ANS_1': 'You need to follow the add participant workflow. Click on the "Add Source & Survey" tab located at the top of the page.',
'INFORMATION_IDENTIFY_ME': 'Can data describing my gut microbiome be used to identify me or a medical condition I have?',
'LOG_IN_WHAT_NOW_ANS_3': 'You can log a sample by clicking the "Log Sample" link in the menu. If you do not see the "Log Sample" link, then all of your barcodes have been assigned.',
'PARTICIPATE_WITH_DIAGNOSIS': 'Can I participate in the project if I am diagnosed with ...?',
'LOG_IN_WHAT_NOW_ANS_5': 'When adding a sample, please be sure to select the barcodes that matches the barcode on the sampling tube of the sample that you are logging',
'TAKES_SIX_MONTHS': 'Does it really take up to three months to get my results?',
'HOW_CHANGE_GUT': 'How can I change my gut microbiome?',
'BETTER_OR_WORSE': 'How can I tell if my gut microbiome is better or worse than other people in my category?',
'ONLY_FECAL_RESULTS_ANS': 'We have only sent out results for fecal samples and are in the process of evaluating how best to present the other sample types. Please see <a href="#faq12">the previous question </a>',
'DIFFERENT_WHATS_WRONG_WITH_ME_ANS': 'No! Your gut microbiome is as unique as your fingerprint so you should expect to see some differences. Many factors can affect your gut microbiome, and any differences you see are likely to be the result of one of these factors. Maybe your diet is different than most people your age. Maybe you just traveled somewhere exotic. Different does not necessarily mean bad.',
'WHEN_RESULTS_NON_FECAL_ANS': 'The vast majority of the samples we\'ve received are fecal, which was why we prioritized those samples. Much of the analysis and results infrastructure we\'ve put in place is applicable to other sample types, but we do still need to assess what specific representations of the data make the most sense to return to participants. We apologize for the delay.',
'FIND_DETAILED_INFO': 'Where can I find more detailed information about my sample?',
'ADD_PARTICIPANT': '<a href="%(add_participant_vid)s">%(shorthand)s - How to Add a Participant</a> from <a href="http://vimeo.com/user16100300">shelley schlender</a> on <a href="http://vimeo.com">Vimeo</a>.' % {"shorthand": AMGUT_CONFIG.project_shorthand, 'add_participant_vid': media_locale["ADD_PARTICIPANT"]},
'PASSWORD_DOESNT_WORK': "My password doesn't work!",
'COMBINE_RESULTS': 'My whole family participated, can we combine the results somehow?',
'PASSWORD_DOESNT_WORK_ANS': '<p>The passwords have some ambiguous characters in them, so we have this guide to help you decipher which characters are in your password.</p>'
'<p class="ambig">abcdefghijklmnopqrstuvwxyz<br>ABCDEFGHIJKLMNOPQRSTUVWXYZ<br>1234567890<br>1 = the number 1<br>l = the letter l as in Lima<br>0 = the number 0<br>O = the letter O as in Oscar<br>g = the letter g as in golf<br>q = the letter q as in quebec</p>',
'HANDLING_SWABS': '%(shorthand)s - Handling Your SWABS</a> from' % {"shorthand": AMGUT_CONFIG.project_shorthand},
'LOG_IN_WHAT_NOW_ANS_4': 'The generic add sample page looks like this:',
'RAW_DATA_ANS_2': 'Processed sequence data and open-access descriptions of the bioinformatic processing can be found at our <a href="https://github.com/qiime/American-Gut">Github repository</a>.</p>'
'<p>Sequencing of %(shorthand)s samples is an on-going project, as are the bioinformatic analyses. These resources will be updated as more information is added and as more open-access descriptions are finalized.' % {"shorthand": AMGUT_CONFIG.project_shorthand},
'RAW_DATA_ANS_1': '<P>The raw data can be fetched from the <a href=http://www.ebi.ac.uk/>European Bioinformatics Institute</a>. EBI is part of <a href=http://www.insdc.org/>The International Nucleotide Sequence Database Collaboration</a> and is a public warehouse for sequence data. The deposited %(project)s accessions so far are:<ol><li style="list-style-type:square"><a href="http://www.ebi.ac.uk/ena/data/view/ERP003819&display=html">ERP003819</a></li><li style="list-style-type:square"><a href="http://www.ebi.ac.uk/ena/data/view/ERP003822&display=html">ERP003822</a></li><li style="list-style-type:square"><a href="http://www.ebi.ac.uk/ena/data/view/ERP003820&display=html">ERP003820</a></li><li style="list-style-type:square"><a href="http://www.ebi.ac.uk/ena/data/view/ERP003821&display=html">ERP003821</a></li><li style="list-style-type:square"><a href="http://www.ebi.ac.uk/ena/data/view/ERP005367&display=html">ERP005367</a></li><li style="list-style-type:square"><a href="http://www.ebi.ac.uk/ena/data/view/ERP005366&display=html">ERP005366</a></li><li style="list-style-type:square"><a href="http://www.ebi.ac.uk/ena/data/view/ERP005361&display=html">ERP005361</a></li><li style="list-style-type:square"><a href="http://www.ebi.ac.uk/ena/data/view/ERP005362&display=html">ERP005362</a></li></ol>' % {"project": AMGUT_CONFIG.project_name},
'BETTER_OR_WORSE_ANS': 'Right now, you can\'t. We\'re still trying to understand what constitutes a normal or average gut microbiome, and we have a lot to learn about the functions of many of the microbes that inhabit the gut. Therefore, it\'s tough to know what combinations of microbes are best for nutrition and health. That\'s one reason collecting data from so many people is important - hopefully we can start to learn more about this.',
'LOOK_BELOW': "If you're still experiencing issues, look for your problem in the FAQ below",
'PASSWORD_SAME_VERIFICATION_ANS': 'No. Your <strong>password</strong> is printed on the sheet that you received with your kit in the mail. That sheet looks like this:</p>'
'<img src="%(FAQ_AMBIGUOUS_PASS)s"/><p>Your <strong>verification code</strong> is emailed to you. Look for the email: <br /><br /><strong>FROM:</strong> %(project)s (%(help_email)s)<br /><strong>SUBJECT:</strong> %(shorthand)s Kit ID & Verification Code' % {"shorthand": AMGUT_CONFIG.project_shorthand, "project": AMGUT_CONFIG.project_name, "FAQ_AMBIGUOUS_PASS": media_locale['FAQ_AMBIGUOUS_PASS'], 'help_email': media_locale['HELP_EMAIL']},
'TAKES_SIX_MONTHS_ANS': 'Yes. It takes about eight weeks for extractions, eight weeks for the remainder of the processing, and two weeks to do the actual sequencing. This is before any analysis and if everything goes as planned, with no delays - equipment down, run failures, reagents or other consumables back ordered. Things do sometimes go wrong, so we say up to three months.',
'PARTICIPATE_WITH_DIAGNOSIS_ANS': 'Of course! The only exclusion criteria are: you must be more than 3 months old and cannot be in prison. Please keep in mind that, for legal and ethical reasons, the %(project)s does not provide medically actionable results or advice.' % {"project": AMGUT_CONFIG.project_name},
'HOW_PROCESS_SAMPLES': 'How are the samples and data processed?',
'WHO_MICHAEL_POLLAN_ANS': 'Michael Pollan is a New York Times Best Seller for his books on diet and nutrition. Further information about Michael can be found <a href="http://michaelpollan.com/">here</a>.',
'WHO_MICHAEL_POLLAN': 'Who is Michael Pollan?',
'HOW_CHANGE_GUT_ANS': 'Although we still don\'t have a predictable way to change the gut microbiome in terms of increasing or decreasing the abundances of specific bacteria, we do know that a variety of factors influence gut microbial community composition. Diet is a major factor affecting the gut microbiome so by changing your diet, you may be able to affect your gut microbiome. We still don\'t fully understand probiotics but know that they can influence your gut microbiome while you are actively taking them. Factors such as stress can also influence the gut microbiome. However, it is important to remember that there are factors we can\'t change, such as age or genetics, that can affect the gut microbiome.',
'RAW_DATA': 'How can I get the raw data?',
'WATCH_VIDEOS': "Watch these helpful videos about what to do once you've received your kit!",
'INTRODUCTION_BEGINNING': '<a href="http://www.robrdunn.com">Rob Dunn</a> has provided this excellent introduction to some of the basics that every curious reader should check out!<br/> <br/>Rob is the author of the <a href="http://www.yourwildlife.org/the-wild-life-of-our-bodies/">Wild Life of Our Bodies</a>. He is an evolutionary biologist and writer at North Carolina State University. For more about your gut and all of your other parts, read more from Rob at <a href="http://www.robrdunn.com">www.robrdunn.com</a></p>'
'',
'INFORMATION_IDENTIFY_ME_ANS': 'No. First, all of your personal information has been de-identified in our database as mandated by institutional guidelines. Second, although each person has a unique gut microbiome, many of the unique qualities are at the species or strain level of bacteria. Our sequencing methods currently do not allow us to describe your gut microbiome in that much detail. Finally, for most medical conditions, there are no known, predictable patterns in gut microbial community composition. Research simply hasn\'t gotten that far yet.</p>'
'<p>We should also mention that since we are only interested in your microbes, we do not sequence human genomic DNA in our typical analyses. Where it is possible for human DNA to be sequenced (e.g., the Beyond Bacteria kits), we remove the human DNA using the same bioinformatics approaches undertaken in the NIH-funded Human Microbiome Project and approved by NIH bioethicists. Additionally, there is so little human DNA in fecal, skin and mucus samples that the chances of us being able to sequence your entire human genome are almost none, even if we tried.',
'FECAL_NO_RESULTS_ANS': 'On any given sequencing run (not just the %(shorthand)s), a small percentage of the samples fail for unknown reasons -- our methods are good but not perfect. This is one of the reasons the sample kits have two Q-tips. It allows us to perform a second microbial DNA extraction and re-sequence if the first attempt failed. We will be doing this for all of the samples that failed. If there was a technical problem with the sample itself (e.g. not enough microbes on the swab) that inhibits us from producing data for you, we will be re-contacting you about collecting another sample.' % {"shorthand": AMGUT_CONFIG.project_shorthand},
'MULTIPLE_KITS_DIFFERENT_TIMES_ANS': 'For best results, we recommend that you mail each sample within 24 hours of collection.',
'STEPS_TO_FOLLOW': '<a href="%(video)s">%(shorthand)s - Steps to Follow When Your Kit Arrives</a> from <a href="http://vimeo.com/user16100300">shelley schlender</a> on <a href="http://vimeo.com">Vimeo</a>.' % {"shorthand": AMGUT_CONFIG.project_shorthand, "video": media_locale["STEPS_VIDEO"]},
'WHY_TWO_SWABS': 'Why are there 2 swabs inside the tube?',
'MULTIPLE_KITS_DIFFERENT_TIMES': 'I have a 2+ sample kit, and would like to collect and send them in at different times',
'COMBINE_RESULTS_ANS': "We're still evaluating how best to present the data for samples that represent a family. We are mailing individual results now and will provide updated results through the web site later.",
'PASSWORD_SAME_VERIFICATION': 'Is my password the same as my verification code?',
'FECAL_NO_RESULTS': 'I sent in a fecal sample but did not get any results, what happened to them?',
'DIFFERENT_WHATS_WRONG_WITH_ME': "I'm different than other people in my category. Does that mean something is wrong with me?",
'WHY_TWO_SWABS_ANS_2': "<P>Each tube is used for <strong>one sample</strong>. The tube has two swabs in it because one is a backup in case the DNA does not amplify on the first swab.</p>"
"<p>Here's a video of Rob Knight talking about swab handling:</p>"
"<iframe src='%(swab_handling)s' width=''500'' height=''281'' frameborder=''0'' webkitallowfullscreen='' mozallowfullscreen='' allowfullscreen=''></iframe>" % {'swab_handling': media_locale['SWAB_HANDLING']},
'MISSING_METADATA_ANS': 'Metadata are information describing your age, gender, diet, etc. Missing metadata mean that this person did not provide us with this information.',
'WHERE_SEND_SAMPLE': 'Where do I send my sample?',
'LOG_IN_WHAT_NOW': "I'm logged in, what do I do now?",
'LOG_IN_WHAT_NOW_ANS_2': '<p>During this workflow you (or whomever is being sampled) will:</p>'
'<ol> <li>Add a participant</li><li>Provide electronic consent</li><li>Answer survey questions (including the diet questions)</li><li>Upon completion, become eligible to log samples</li> </ol><p>When participants are eligible, you will then see their name under the corresponding menu on the left, in this example we have just added the participant "Test":</p>'
'',
'PROJECT_101': '%(shorthand)s 101' % {"shorthand": AMGUT_CONFIG.project_shorthand},
'WHAT_FORMS_ANS': 'The instruction on the sampling instructions that requires you to "place your forms and the sample tube in preaddressed envelope" is leftover from a previous version of the sampling instructions. There are no forms for you to include inside the envelope with your sample. If you are shipping internationally, please visit the <a href="%(sitebase)s/international_shipping/">International Shipping Instructions</a></p>' % {'sitebase': media_locale['SITEBASE']},
'WHY_TWO_SWABS_ANS_1': 'Each sampling tube contains two swabs and looks like this:',
'MISSING_METADATA': 'What are missing metadata?',
'ONLY_FECAL_RESULTS': 'I sent more than one kind of sample, but I only received data for my fecal sample. What happened to my other samples?',
'NOT_A_BUSINESS_ANS': 'We have had many enquiries about our "service" or "business". %(shorthand)s is a contribution-supported academic project that is a collaboration between the <a href="http://www.earthmicrobiome.org">Earth Microbiome Project</a> and the <a href="http://humanfoodproject.com/">Human Food Project</a>, primarily run out of the <a href="https://knightlab.colorado.edu/">Knight Lab</a> at the University of Colorado at Boulder, and is not a business or service. In particular, %(shorthand)s is not a diagnostic test (although the information gained through the project may in future contribute to the development of diagnostic tests). All data except for information that needs to be kept confidential for privacy reasons is openly and freely released into public databases, and the project is not intended to make a profit (any surplus funds would be recycled back into furthering human microbiome research).' % {"shorthand": AMGUT_CONFIG.project_shorthand},
'HOW_PROCESS_SAMPLES_ANS_1': 'The majority of the samples in the %(project)s are run through a processing pipeline designed to amplify a small region of a gene that is believed to be common to all Bacteria and Archaea. This gene, the 16S ribosomal RNA gene is like a barcode you find on your groceries, and serves as a marker for different organisms. There are quite a few different ways to assess the types of Bacteria and Archaea in a sample, including a variety of techniques even to look at this single gene. Every method has its biases, and comparing data between different methods is <a href="http://www.ncbi.nlm.nih.gov/pubmed/23861384">non-trivial</a> and can sometimes be nearly impossible. One of the primary goals of the %(shorthand)s is to provide data that can be used and reused by researchers worldwide, we have opted to use the standard protocols adopted by the <a href="http://earthmicrobiome.org">Earth Microbiome Project</a>, (<a href="http://www.ncbi.nlm.nih.gov/pubmed/22402401">Caporaso et al 2012</a>, and more detailed description of the <a href="http://www.earthmicrobiome.org/emp-standard-protocols/16s/">protocol</a>). This ensures that the data generated by the %(shorthand)s can be combined with the other 80,000 samples so far indexed by the EMP (as scientists, we get giddy about things like this).</p>' % {'shorthand': AMGUT_CONFIG.project_shorthand, 'project': AMGUT_CONFIG.project_name},
'HOW_PROCESS_SAMPLES_ANS_2': 'DNA sequencing is a complex challenge that involves an army of robots, ultra pure water that costs $75 per 10ml, and an amazing <a href="http://www.illumina.com/systems/miseq.ilmn">digital camera</a> that actually determines individual sequences one nucleotide at a time. The number of stunningly brilliant minds whose footprints exist in these methods is astounding. However, the challenges don\'t end once you get the DNA sequence - some might say they are just beginning. It turns out that figuring out what actually is in your sample, that is, what organisms these sequences correspond to, requires cutting edge computational approaches, supercomputers and caffeine for the people operating them. The questions being asked of the data are themselves complex, and volume of data being processed is simply phenomenal. To give you some idea, for each sample sequenced we obtain around 6 million nucleotides which we represent as letters (A, T, G or C, see <a href="http://en.wikipedia.org/wiki/Nucleotide">here</a> for more info), whereas Shakespeare\'s Hamlet only contains around 150,000 letters (ignoring spaces).</p>',
'HOW_PROCESS_SAMPLES_ANS_3': 'The primary software package we use for processing 16S sequence data is called Quantitative Insights into Microbial Ecology (<a href="http://www.qiime.org">QIIME</a>; <a href="http://www.ncbi.nlm.nih.gov/pubmed/20383131">Caporaso et al. 2010</a>). Using this package, we are able to start with raw sequence data and process it to so that we end up be able to explore the relationships within and between samples using a variety of statistical methods and metrics. To help in the process, we leverage a standard and comprehensive (to date) reference database called Greengenes (<a href="http://www.ncbi.nlm.nih.gov/pubmed/22134646">McDonald et al. 2011</a>; <a href="http://www.ncbi.nlm.nih.gov/pubmed/16820507">DeSantis et al. 2006</a>) that includes information on a few hundred thousand Bacteria and Archaea (it is likely that there are millions or more species of bacteria). Due to the molecular limitations of our approach, and the lack of a complete reference database (because the total diversity of microbes on Earth is still unknown), our ability to determine whether a specific organism is present has a margin of error on the order of millions of years, which limits our ability to assess specific strains or even species using this inexpensive technique (more expensive techniques, such as some of the higher-level analyses and contributions, can provide this information). But all is not lost! By using the evolutionary history of the organisms as inferred by the small pieces of DNA that we have, we can begin to ask broad questions about the diversity within (see <a href="http://www.ncbi.nlm.nih.gov/pubmed/7972354">Faith 1994</a>) and between samples (see <a href="http://www.ncbi.nlm.nih.gov/pubmed/16332807">Lozupone and Knight 2005</a>), and whether the patterns observed relate to study variables (e.g., BMI, exercise frequency, etc).</p>',
'HOW_PROCESS_SAMPLES_ANS_4': 'The specifics on how the %(shorthand)s sequence data are processed can be found <a href="http://nbviewer.ipython.org/github/biocore/American-Gut/blob/master/ipynb/module2_v1.0.ipynb">here</a>, and are written up in an executable <a href="http://ipython.org/notebook">IPython Notebook</a>, which provides all the relevant processing steps in an open-source format. Be warned, processing the full %(shorthand)s dataset takes over 5,000 CPU hours right now (i.e. if you do it on your laptop it might take 7 months, even if you don\'t run out of memory: this might put the time it takes to get your results in perspective). This is the processing pipeline that we use on your data. As this project is a work in progress, we are versioning the processing pipeline as there will continue to be improvements to the process as the project moves forward.</p>' % {'shorthand': AMGUT_CONFIG.project_shorthand},
'HOW_PROCESS_SAMPLES_ANS_5': 'Additional information about the tools used in the %(project)s and our contributions to the microbiome community can be found in the following publications:',
'HOW_PROCESS_SAMPLES_ANS_6': '<ul> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/21552244">Minimum information about a marker gene sequence (MIMARKS) and minimum information about any (x) sequence (MIxS) specifications.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/24280061">EMPeror: a tool for visualizing high-throughput microbial community data.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/16332807">UniFrac: a new phylogenetic method for comparing microbial communities.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/16893466">UniFrac--an online tool for comparing microbial community diversity in a phylogenetic context.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/17220268">Quantitative and qualitative beta diversity measures lead to different insights into factors that structure microbial communities.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/19710709">Fast UniFrac: facilitating high-throughput phylogenetic analyses of microbial communities including analysis of pyrosequencing and PhyloChip data.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/20827291">UniFrac: an effective distance metric for microbial community comparison.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/21885731">Linking long-term dietary patterns with gut microbial enterotypes.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/23326225">A guide to enterotypes across the human body: meta-analysis of microbial community structures in human microbiome datasets.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/22699609">Structure, function and diversity of the healthy human microbiome.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/22699610">A framework for human microbiome research.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/23587224">The Biological Observation Matrix (BIOM) format or: how I learned to stop worrying and love the ome-ome.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/22134646">An improved Greengenes taxonomy with explicit ranks for ecological and evolutionary analyses of bacteria and archaea.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/21304728">The Earth Microbiome Project: Meeting report of the "1 EMP meeting on sample selection and acquisition" at Argonne National Laboratory October 6 2010.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/21304727">Meeting report: the terabase metagenomics workshop and the vision of an Earth microbiome project.</a></li> </ul>',
'HOW_PROCESS_SAMPLES_ANS_7': 'More detail on our work on the effects of storage conditions can be found in these publications:',
'HOW_PROCESS_SAMPLES_ANS_8': '<ul> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/20412303">Effect of storage conditions on the assessment of bacterial community structure in soil and human-associated samples.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/20673359">Sampling and pyrosequencing methods for characterizing bacterial communities in the human gut using 16S sequence tags.</a></li> </ul>',
'HOW_PROCESS_SAMPLES_ANS_9': 'And more detail on our work on sequencing and data analysis protocols can be found in these publications:',
'HOW_PROCESS_SAMPLES_ANS_10': '<ul> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/17881377">Short pyrosequencing reads suffice for accurate microbial community analysis.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/18723574">Accurate taxonomy assignments from 16S rRNA sequences produced by highly parallel pyrosequencers.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/18264105">Error-correcting barcoded primers for pyrosequencing hundreds of samples in multiplex.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/22237546">Selection of primers for optimal taxonomic classification of environmental 16S rRNA gene sequences.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/22170427">Comparison of Illumina paired-end and single-direction sequencing for microbial 16S rRNA gene amplicon surveys.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/21716311">Impact of training sets on classification of high-throughput bacterial 16s rRNA gene surveys.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/21349862">PrimerProspector: de novo design and taxonomic analysis of barcoded polymerase chain reaction primers.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/20383131">QIIME allows analysis of high-throughput community sequencing data.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/22161565">Using QIIME to analyze 16S rRNA gene sequences from microbial communities.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/23861384">Meta-analyses of studies of the human microbiota.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/24060131">Advancing our understanding of the human microbiome using QIIME.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/20534432">Global patterns of 16S rRNA diversity at a depth of millions of sequences per sample.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/22402401">Ultra-high-throughput microbial community analysis on the Illumina HiSeq and MiSeq platforms.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/23202435">Quality-filtering vastly improves diversity estimates from Illumina amplicon sequencing.</a></li> <li><a href="http://www.ncbi.nlm.nih.gov/pubmed/22699611">Human gut microbiome viewed across age and geography.</a></li> </ul>' % {"shorthand": AMGUT_CONFIG.project_shorthand, "project": AMGUT_CONFIG.project_name},
'ANOTHER_COPY_RESULTS_ANS': 'You can download a copy from our website. Log in with your account name and password, go to the left side bar, move your mouse to Human Samples -> PARTICIPANT NAME -> SAMPLE NUMBER, and then click on SAMPLE NUMBER.pdf to download it.' % {"shorthand": AMGUT_CONFIG.project_shorthand, "project": AMGUT_CONFIG.project_name},
'FIND_DETAILED_INFO_ANS': 'You can find the raw data from European Bioinformatics Institute (please see <a href="#faq8">here</a>) or download the copy of your result from our website (please see <a href="#faq20">here</a>).',
'WHEN_RESULTS_NON_FECAL': 'I sent in a non-fecal sample and have not received any results, when should I expect results?',
'WHAT_FORMS': 'What are the forms you talk about on the sampling instructions?',
'INTRODUCTION_WHAT_IS_GUT_HEAD': "What is a Gut?",
'INTRODUCTION_WHAT_IS_GUT': "Your gut is a hole that runs through your body. Your gut is actually, developmentally speaking, the outside of your body, but it has evolved many intricacies that make it seem like the inside. Your gut starts with your mouth and ends with your anus. In between food is changed into energy, feces, bacteria, viruses and a few other things. Your gut exacts a kind of metamorphosis on everything you eat, turning hotdog or grilled cheese, miraculously, into energy and, ultimately, cells, signals and even thoughts. We are only beginning to understand this process, a process in which microbes play (or fail to play) a major role.",
'INTRODUCTION_WHAT_IS_PROJECT_HEAD': "What is the %(project_name)s?" % {'project_name': AMGUT_CONFIG.project_name},
'INTRODUCTION_WHAT_IS_PROJECT': "<p>The %(project_name)s is a project in which scientists aim to work with non-scientists both to help them (AKA, you) understand the life inside their own guts and to do science. Science is coolest when it is informs our daily lives and what could possibly be more daily than what goes on in your gut? One of the big questions the %(project_shorthand)s scientists hope to figure out is what characterizes healthy and sick guts (or even just healthier and sicker guts) and how one might move from the latter to the former. Such is the sort of big lofty goal these scientists dream about at night (spirochetes rather than sugarplums dancing through their heads), but even the more ordinary goals are exciting. Even just beginning to know how many and which species live in our guts will be exciting, particularly since most of these species have never been studied, which is to say there are almost certainly new species inside you, though until you sample yourself (and all the steps that it takes to look at a sample happen— the robots, the swirling, the head scratching, the organizing of massive datasets), we won't know which ones. Not many people get to go to the rainforest to search for, much less discover, a new kind of monkey, but a new kind of bacteria, well, it is within (your toilet paper's) reach." % {'project_shorthand': AMGUT_CONFIG.project_shorthand, 'project_name': AMGUT_CONFIG.project_name},
'INTRODUCTION_WHAT_IS_16S_HEAD': "What is 16S rRNA?",
'INTRODUCTION_WHAT_IS_16S': "16S rRNA is a sort of telescope through which we see species that would otherwise be invisible. Let me explain. Historically, microbiologists studied bacteria and other microscopic species by figuring out what they ate and growing them, on petri dishes, in labs, in huge piles and stacks. On the basis of this approach— which required great skill and patience— thousands, perhaps hundreds of thousands, of studies were done. But then… in the 1960s, biologists including the wonderful radical <a href=\"http://www.robrdunn.com/2012/12/chapter-8-grafting-the-tree-of-life/\">Carl Woese</a>, began to wonder if the RNA and DNA of microbes could be used to study features of their biology. The work of Woese and others led to the study of the evolutionary biology of microbes but it also eventually led to the realization that most of the microbes around us were not culturable— we didn't know what they ate or what conditions they needed. This situation persists. No one knows how to grow the vast majority of kinds of organisms living on your body and so the only way to even know they are there is to look at their RNA. There are many bits of RNA and DNA that one might look at, but a bit called 16S has proven particularly useful.",
'INTRODUCTION_ROBOTS_HEAD': "Do you really have a robot?",
'INTRODUCTION_ROBOTS': "Look, here is the deal. Robots. Microbiologists use robots. Personally, I think the fact that microbiologists study the dominant fraction of life on Earth, a fraction that carries out most of the important process (and a fair bit of inexplicable magic) makes microbiologists cool. I am not a microbiologist; I am an evolutionary biologist and a writer, but I think that microbiologists are hipsters cloaked in scientists clothing (and language). But if the outrageousness of their quarry does not convince you they are hip, well, then, let me remind you, they have robots.<br/> <br/>The robots enable the scientists to rapidly extract the DNA and RNA from thousands of samples simultaneously. Specifically, they can load your samples into small plastic plates each with 96 little wells. The robot then loads chemicals into the wells and heats the chemically-laced wells enough to break open the bacterial cells in the sample, BAM! This releases the cell's DNA and RNA. The robots then decode the specific letters (nucleotides) of the 16S gene using the nucleotides dumped out of the broken microbial cells into these plates.",
'INTRODUCTION_TREE_HEAD': "Tree of life",
'INTRODUCTION_TREE': "There is an evolutionary tree inside you. Well, sort of. When scientists study the microbes in your gut (and from the same samples we could also study viruses, bacteriophages— the viruses that attack bacteria—, fungi or even the presence of animals such as worms of various sorts) they do so by looking at the 16s or other genetic code of the RNA on the swabs you send us. We compare the code of each bit of RNA we find to the code of species other people have collected and also the code of the other bits of RNA in your sample. As a result, we can actually use the results of your sample to map the species living in you onto an evolutionary tree. Your own genes occupy one tiny branch on the tree of life, but the species inside of you come from all over the evolutionary tree. In fact, in some people we find species from each of the major branches of the tree of life (archaea, bacteria, eukaryotes) and then also many of the smaller branches. Inside you, in other words, are the consequences of many different and ancient evolutionary stories.",
'INTRODUCTION_MICROBIOME_HEAD': "What is a microbiome?",
'INTRODUCTION_MICROBIOME': "A biome, as ecologists and evolutionary biologists like me historically used it is a self-contained ecosystem, where all the organisms can interact with each other and the environment in which they live, for example a rain forest is a biome, but it is made of smaller biomes, for example a tree is a biome for insects, then a single insect is a biome for bacteria. Therefore, these smaller biomes are often called microbiomes, in the case of you, it's your gut!… A microbiome is a small (micro) version of this larger phenomenon, a whole world within you.",
'INTRODUCTION_EAT_HEAD': "What do my microbes eat?",
'INTRODUCTION_EAT': "Everyplace you have ever set your hand or any other part of your body is covered in microbes. This is true of your gut, but also everything else. Microbes live in clouds. They live in ice. They live deep in the Earth. They also live in your colon, on your skin, and so on. It is reasonable to wonder what they eat. The short answer is everything. Microbes are thousands of times more variable when it comes to their diets than are animals, plants or even fungi. Some microbes can get their nitrogen out of the air; they can, in other words, eat air. Ain't that cool. Others, like us, eat other species, munching them in the world's coolest and most ubiquitous game of packman. The bacteria in your gut are also diverse in terms of their diets. If there are two hundred species of bacteria in your gut (and there probably are at least that many) then there are at least that many different combinations of things that they are eating.",
'INTRODUCTION_MICROBES_COME_FROM_HEAD': "Where do my microbes come from?",
'INTRODUCTION_MICROBES_COME_FROM': "If you had asked this question a few years ago, we would have had to say the stork. But increasingly we are beginning to understand more about where the specific zoo of species in you come from and it is a little grosser than the stork. If you were born vaginally, some of your gut microbes came from your mother's feces (birth, my friend, is messy). Some came from her vagina. Others came, if you were breast fed, from her milk. It is easiest for bacteria, it seems, to colonize our guts in the first moments of life. As we age, our stomachs become acidic. We tend to think of the acid of our stomachs as aiding in digestion; it does that but another key role of our stomachs is to prevent pathogenic species from getting into our guts. The trouble with this, well, there are a couple of problems. One is c-section birth. During c-section birth, microbes need to come from somewhere other than the mother's vagina and feces. The most readily available microbes tend to be those in the hospital. As a result, the microbes in c-section babies tend to, at least initially, resemble those of the hospital more than they resemble those of other babies. With time, many c-section babies eventually get colonized by enough good bacteria (from pet dogs, pet cats, their parents' dirty hands, etc..) to get good microbes, but it is a more chancy process. But then, the big question, one we just don't know the answer to, is which and how many microbes colonize our guts as we get older. How many microbes ride through the acid bath of our stomach on our food and take up residence? We know that bad bacteria, pathogens, do this, but just how often and how good ones do it is not well worked out. You might be thinking, what about yoghurt and I'll tell you the answer, definitely, is we don't really know. Do people who eat yoghurt have guts colonized by species from that yoghurt? Maybe, possibly, I bet they do, but we don't really know (though if we get enough samples from yoghurt and non yoghurt eaters, we could know).",
'INTRODUCTION_DISCOVER_HEAD': "What will we discover in your gut?",
'INTRODUCTION_DISCOVER': "When the early meetings were going on about this project, everyone sat around talking about what we might see from colon samples. One scientist was sure that we would see bacteria that looked like Elvis. Another though we would find Shakespeare's great lost play. But the truth is all that we are going to see from your gut are lists of nucleotides. Let me explain…<br/> <br/>Nucleotides are those hunks of protein out of which DNA and RNA are made. They come in different forms to which scientists have assigned names and letters. When the robots are done with the work, what they produce are lists of the nucleotides in all of 16S genes from all of the cells in your sample. These nucleotides tell the scientists which kinds of life are in your sample (and in you). But because we will only have samples of little stretches of the 16S genes, we won't know exactly which species are in you, just which lineages they are from. You might have the bacterial equivalent of a chimpanzee and a gorilla in you, but all we'll know from your sample is that there was an ape. Knowing you have a bacterial ape in your gut will, on its own, not tell you so much. The real information will come from context, statistical context. I know, that sounds boring, but I promise it is not.<br/> <br/>We think that hundreds of different things you do during your life, in addition to what your mother and father did (let's try not to think about that), your genes and even just where you grew up influence which species of microbes are found inside you. But we don't really know. The problem is humans are so darn complicated. What we need to be able to do is to compare large numbers of people, people who differ in many ways, to be able to sort out which variables are sometimes a little important and which ones are the big deal. Is a vegan gut very different from a vegetarian one? Does eating yoghurt make a big difference? Do the effects of a c-section birth last forever? These questions require us to compare many people, which is where you come in. Your sample, gives us context and it gives you context too. It won't be terribly exciting on its own (you will know which ancient lineages you have dividing and thriving inside you. OK, that is pretty cool on second thought), but it will be very exciting in context. Where do you fall relative to fish eaters, sick people healthy people, hunter gatherers, or even your dog? You will know and we will know. And this is not all.<br/> <br/>All of the questions I have mentioned so far are what I might call first order questions. How does this thing compare to that thing. But what we'd love to be able to answer are second order questions, contingent questions, questions such as whether the effect of your diet depends on your ethnicity (it probably does), whether the effect of having a dog depends on whether or not you live in the city (again, I bet it does) and so on. These questions are exactly the sort of question we have failed to be able to answer well when it comes to diet, because we don't have big enough samples sizes. We can see the forest for all of humans. Well, that isn't quite right, but you get the idea, we will be able to understand elaborate effects of multiple variables on the wilderness between your pie hole and the other hole and that, to us, is exciting.",
'INTRODUCTION_STORIES_HEAD': "A few of the stories of the evolutionary tree in your gut",
'INTRODUCTION_STORIES': "Some people have least favorite bacteria. Salmonella, for example, seems to have inspired some haters. But microbiologists also have favorite bacteria, as well they should. The stories of bacteria (and those who chase and study them) are among the most important of humanities stories and include the tales of many species without which we could not live, or whose presence or absence affects how we live. These species are as fascinating and, dare I say, lovely as pandas or koala bears, just harder to see and far more significant. I have begun to compile a book of the stories of some of the most common and interesting species you are likely to encounter— whether in your own gut, on your lettuce or the next time you sink your fingers into the soil. These stories will be available online here at <a href=\"http://invisiblelife.yourwildlife.org/\">Invisible Life</a> as they are compiled as a book, a book written by some of the very best science writers AND scientists out there. For starters, you might be interested to know that <a href=\"http://invisiblelife.yourwildlife.org/mycoplasma/\">the smallest species on Earth</a> is sometimes found inside humans and, once we look at your 16S, we will even know whether it lives in you. As more of these stories are written, they will appear here, eventually as an ebook, an ebook that you can reference when you find out what lives inside you to know whether your constant companion is a species we know everything about or, as is more typical, no one has ever studied. Like Charlie Chaplin once said… Wait, Charlie Chaplin was the one who didn't say anything wasn't he.",
'ANOTHER_COPY_RESULTS': 'Can I get another copy of my results?',
'NOT_A_BUSINESS': 'We are not a business',
'WHERE_SEND_SAMPLE_ANS': '<p>This is the shipping address:</p>'
'%(address)s<p>If you are shipping internationally, please see the <a href="%(sitebase)s/international_shipping/">international shipping instructions</a>.' % {'sitebase': media_locale['SITEBASE'], 'address': media_locale['SHIPPING_ADDRESS']}
}
_TAXA_SUMMARY = {'RESOLUTION_NOTE': "Note: Where there are blanks in the table below, the taxonomy could not be resolved in finer detail.",
'PERCENTAGES_NOTE': "Note: The percentages listed represent the relative abundance of each taxon. This summary is based off of normalized data. Because of limitations in the way the samples are processed, we cannot reliably obtain species level resolution. As such, the data shown are collapsed at the genus level.",
'DOWNLOAD_LINK': "Download the table"}
_HELP_REQUEST = {
'CONTACT_HEADER': "Contact the %(shorthand)s" % {"shorthand": AMGUT_CONFIG.project_shorthand},
'RESPONSE_TIMING': "We will send a response to the email address you supply within 24 hours.",
'FIRST_NAME': "First name",
'LAST_NAME': "Last name",
'EMAIL_ADDRESS': "Email address",
'PROBLEM_PROMPT': "Enter information related to your problem"
}
_DB_ERROR = {
'HEADER': 'Oops! There seems to be a database error.',
'MESSAGE': 'Please help us to debug by emailing us at <a href="mailto:%(help_email)s">%(help_email)s</a> and tell us exactly what happend before you got this error.' % {"help_email": media_locale["HELP_EMAIL"]},
'SIGNOFF': 'Thanks, <br /> The American Gut Team'
}
_404 = {
'MAIN_WARNING': '404: Page not found!',
'HELP_TEXT': 'Click <a href="mailto:%(help_email)s">HERE</a> to email us about the issue. Please include the URL you were trying to access:' % {'help_email': media_locale['HELP_EMAIL']}
}
_403 = {
'MAIN_WARNING': '403: Unauthorized access!',
'HELP_TEXT': 'Click <a href="mailto:%(help_email)s">HERE</a> to email us about the issue. Please include the URL you were trying to access:' % {'help_email': media_locale['HELP_EMAIL']}
}
_PARTICIPANT_OVERVIEW = {
'COMPLETED_CONSENT': 'Completed consent',
'COMPLETED_SURVEY': 'Completed survey',
'SAMPLES_ASSIGNED': 'Samples assigned',
'OVERVIEW_FOR_PARTICPANT': 'Overview for participant'
}
_ADD_SAMPLE_OVERVIEW = {
'ADD_SAMPLE_TITLE': 'Choose your sample source ',
'ADD_SAMPLE_TITLE_HELP': 'The sample source is the person, animal or environment that the sample you are currently logging came from. If you took the sample from yourself, you should select yourself as the sample source.',
'ENVIRONMENTAL': 'Environmental',
'ADD_SAMPLE_1': 'If you don\'t see the sample source you want here, you need to add it. You can do this in ',
'ADD_SAMPLE_2': 'Step 2',
'ADD_SAMPLE_3': ' on the main page when you log in.',
'HUMAN_SOURCE': 'Human Source',
'ANIMAL_SOURCE': 'Animal Source'
}
_SAMPLE_OVERVIEW = {
'BARCODE_RECEIVED': 'Sample %(barcode)s. This sample has been received by the sequencing center!',
'DISPLAY_BARCODE': 'Sample %(barcode)s',
'RESULTS_PDF_LINK': 'Click this link to visualize sample %(barcode)s in the context of other microbiomes!',
'SAMPLE_NOT_PROCESSED': 'This sample has not yet been processed. Please check back later.',
'DATA_VIS_TITLE': 'Data Visualization',
'TAXA_SUM_TITLE': 'Taxa Summary',
'RAW_SEQUENCE_TITLE': 'Raw sequences',
'EXCEL_TABLE_TITLE': 'Summarized data',
'BIOM_TABLE_TITLE': 'Summarized data (<a href="http://biom-format.org">biom-format.org</a>)',
'VIEW_TAXA_SUMMARY': 'View Taxa Summary',
'SAMPLE_STATUS': 'Sample Status',
'SAMPLE_SITE': 'Sample Site',
'SAMPLE_DATE': 'Sample Date',
'SAMPLE_TIME': 'Sample Time',
'SAMPLE_NOTES': 'Notes',
'REMOVE_BARCODE': 'Remove barcode %(barcode)s'
}
_NEW_PARTICIPANT_OVERVIEW = {
'ADD_NEW': 'Add a New Human Sample Source',
'EXPLANATION': 'You have entered the add human source workflow. During this workflow you will add a human source that represents whoever is being sampled. You be asked for consent to join the project and then asked survey questions.',
'ONCE_ADDED': 'Once you have added a human source, you will then see the name of that source in the left menu, and you will also have an option for adding a sample to that source. When you click that, you will be able to select the appropriate barcode and add sample metadata.',
'ELECTRONIC_SIGNATURE': 'In order to participate in this study, you will need to sign a research consent form. This must be done electronically. To consent to using an electronic signature, please click the button below. To obtain a hard copy of the signed agreement, please email the help desk (americangut@gmail.com). You may revoke this consent at any time by going to human samples -> person name -> remove person name. Revoking consent will also halt processing of your sample, if applicable. Once your sample is processed, we can not remove it from the deidentified information distributed, regardless of consent revocation.',
'ELECTRONIC_SIG_CONSENT': 'I consent to using an electronic signature'
}
_INTERNATIONAL = {
'PAGE_TITLE': '%(shorthand)s International Shipping Instructions' % {'shorthand': AMGUT_CONFIG.project_shorthand},
'INTERNATIONAL_HEADER_1': "International Shipping",
'INTERNATIONAL_TEXT_1': 'Please send any non-UK international samples to:',
'INTERNATIONAL_TEXT_2': 'In order to comply with amended federal and IATA regulations, we are requesting that international participants return their sample tubes through FedEx International and that international participants follow the additional safely requirements for shipping human swab samples to the United States. Your airway bill must clearly identify the package as containing "human exempt specimens". The samples will additionally need to be packaged within a secondary containment to ensure that they can safely enter the United States.',
'INTERNATIONAL_TEXT_3': "For shipment, you will need to use clear tape to secure the sample swabs to the sample tube, then place the sample tube in the provided buff mailing envelope. Then place the buff envelope inside a Tyvek/plastic mailer, <strong>which can be acquired free of charge from FedEx</strong>, when shipping the sample, prior to FedEx shipment.",
'INTERNATIONAL_TEXT_4': "If you do not follow these directions the sample will be destroyed by United States Customs at the port of entry into the United States.",
'YOUR_SAMPLES': 'Your samples',
'YOUR_SAMPLES_LIST': '<li>Are considered dried specimens</li><li>Must be shipped via FedEx</li><li>Must have tape to sealing the plastic tube that contains the swab</li><li>Must be placed in a buff mailing envelope with the buff envelope placed inside a Tyvek/plastic mailer prior to FedEx shipment</li><li>Must be shipped with an airway bill and must be labeled with the complete address of the sender and complete address of recipient, and with the words "Human exempt sample(s)"</li>',
'AMERICAN_GUT_ADDRESS': media_locale["SHIPPING_ADDRESS"]
}
_NEW_PARTICIPANT = {
'ADD_HUMAN_TITLE': 'Add a New Human Source',
'SEL_AGE_RANGE': 'Select age range of participant:',
'ADD_HUMAN_HELP_SUGGESTION': 'If you need help with the website, please use the contact mechanism in the menu to the left. Please do not email the people listed in this form for help, unless it has to do with an injury. ',
'AGE_0_6': '6 weeks - 6 years',
'AGE_7_12': '7-12 years',
'AGE_13_17': '13-17 years',
'AGE_18': '18+ years',
'ASSENT_7_12': '''<p align='center'>University of California, San Diego<br/>
Assent to Act as a Research Subject<br/>
(Ages 7-12 years)</p>
<p align='center'><b>Human bugs: why and where they live on you</b></p>
<p>Dr Rob Knight and his research team are doing a research study to find out more about the trillions of tiny living things likes bacteria that live in you or on you. You are being asked if you want to be in this study because you are different (in a good way) from everybody else and they are different from each other.</p>
<p>If you decide you want to be in this research study, this is what will happen to you:</p>
<p>We will ask you or your mom or dad to sample some place on your body (like skin or mouth) or your poop (from toilet paper) with something that looks like 2 Q-tips.</p>
<p>Sometimes kids don't feel good while being in this study. You might feel a little bit sore where your skin is rubbed with the Q-tip. Most people don't feel this.</p>
<p>If you feel any of these things, or other things, be sure to tell your mom or dad.</p>
<p>You don't have to be in this research study if you don't want to. Nobody will be mad at you if you say no. Even if you say yes now and change your mind after you start doing this study, you can stop and no one will be mad.</p>
<p>Be sure to ask Dr. Knight or his research team to tell you more about anything you don't understand.</p>
''',
'ASSENT_13_17':
'''<p align='center'>University of California, San Diego<br/>
Assent to Act as a Research Subject<br/>
(Ages 13-17 years)</p>
<p align='center'><b>Explaining variability in the human microbiome</b></p>
<p style='font-weight: bold;'>Who is conducting the study, why you have been asked to participate, how you were selected, and what is the approximate number of participants in the study?</p>
<p>Professor Rob Knight is conducting a research study to find out more about the microbiome (harmless or beneficial microorganisms (tiny living things such as bacteria) that live on and within your body). You have been asked to participate in this study because you, and everyone else on earth have a unique microbiome, and the more people we study of all ages will help us to understand how the micro-organisms may help or harm us. There will be approximately 1000 participants in total.</p>
<p style='font-weight: bold;'>Why is this study being done?</p>
<p>The purpose of this study is to try to understand why different kinds of microorganisms live on and within different people. We are interested in learning whether people with similar age, diet, environment, family, pets, body weight, or other features, also have similar microorganisms. Investigating this question will help us determine how microorganisms contribute to human biology and human health.</p>
<p style='font-weight: bold;'>What will happen to you in this study and which procedures are standard of care and which are experimental?</p>
<p>If you agree to to take part in this study, the following will happen to you: You will be asked to sign this assent form and then complete a survey about what you eat, how old you, are, whether you are a boy or girl, how tall you are and how much you weigh.</p>
<p>Then you will be asked to sample yourself with the swabs (look like Q-tips) we give you. The most common sample is of your poop (stool) where you apply a small smear to the tips of the swab from used toilet tissue. You may also sample any area of skin, your tongue or mouth, your nostrils, ear wax, vagina, hair or nails. The sampling is usually done by you at home or with the help of your mom or dad if it is in a hard to reach place. We will ask you if you are willing to let us use what is left of your sample for other studies.</p>
<p style='font-weight: bold;'>None of these samples will allow us to make a diagnosis of disease.</p>
<p>We intend to look at the different types of bacteria in your sample by getting out their DNA - we won't take out any of your DNA.</p>
<p style='font-weight: bold;'>How much time will each study procedure take, what is your total time commitment, and how long will the study last?</p>
<p>It should take you about 30 minutes to answer the survey questions and less than 15 minutes each time we ask you for a sample.</p>
<p style='font-weight: bold;'>What risks are associated with this study?</p>
<p>It is unlikely that there are risks to you from taking part in the study. The investigation staff have taken precautions to ensure that there is minimal risk of your private information leaking out. If the information about you were to become public the impilications are minimal because the tests cannot be used for diagnosis.</p>
<p>Because this is a research study, there may be some unknown risks that are currently unforeseeable. You will be informed of any significant new findings.</p>
<p style='font-weight: bold;'>What are the alternatives to participating in this study?</p>
<p>You do not have to participate. No harm would come to you.</p>
<p style='font-weight: bold;'>What benefits can be reasonably expected?</p>
<p>There is no direct benefit to you from taking part in this study. The investigator may learn more about the human microbiome in health and disease and benefit everyone.</p>
<p style='font-weight: bold;'>Can you choose to not participate or withdraw from the study without penalty or loss of benefits?</p>
<p>Participation in research is entirely voluntary. You may refuse to participate or withdraw at any time without upsetting the researchers. You will be told if any important new information is found during the course of this study that may affect your wanting to continue.</p>
<p style='font-weight: bold;'>Can you be withdrawn from the study without your assent?</p>
<p>You may be withdrawn from if you do not sign this form. You may also be withdrawn from the study if you do not follow the instructions given you by the study personnel.</p>
<p style='font-weight: bold;'>Will you be compensated for participating in this study?</p>
<p>You will not be financially compensated in this study.</p>
<p style='font-weight: bold;'>What if you are injured as a direct result of being in this study?</p>
<p>If you are injured or become ill as a direct result of this research study, you will be provided with medical care.</p>
<p style='font-weight: bold;'>What about your confidentiality?</p>
<p>Research records will be kept confidential to the extent allowed by law. All data about you that is entered on the survey is stored on a password-protected server located at the SDSC card access controlled facility at UCSD. The code key (that records which barcode or sample name was on your sample) is stored on a separate password-protected server that is accessible only to Professor Knightand Dr Gail Ackermann. All analysis is done on data that has no record of who you are. We will put the data into a place where other reseachers can access it, but there will not be a way of determining who you are from what is in there.</p>
<p>Research records may be reviewed by the UCSD Institutional Review Board.</p>
<p style='font-weight: bold;'>Who can you call if you have questions?</p>
<p>Dr Rob Knight and/or Gail Ackermann has explained this study to you and answered your questions. If you have other questions or research-related problems, you may reach Dr Knight at 858-246-1194</p>
<p>You may call the Human Research Protections Program Office at (858) 657-5100 to inquire about your rights as a research subject or to report research-related problems.</p>
<p style='font-weight: bold;'>Your Signature and Assent</p>
<p>You have received a copy of this assent document and a copy of the "Experimental Subject's Bill of Rights" to keep.</p>
<p>You agree to participate.</p>''',
'CONSENT_18':
'''<p align='center'>University of California, San Diego<br/>
Consent to Act as a Research Subject</p>
<p style='font-weight: bold;' align='center'>American Gut Project</p>
<p style='font-weight: bold;'>Who is conducting the study, why you have been asked to participate, how you were selected, and what is the approximate number of participants in the study?</p>
<p>Dr Rob Knight is conducting a research study to find out more about the trillions of bacteria and other organisms (called your microbiome) that live in and on your body. You have been asked to participate in this study because your microbiome is unique - not the same as anyone else on earth. There will be approximately 20,000 participants in the study from across the USA and from other countries around the world.</p>
<p style='font-weight: bold;'>Why is this study being done?</p>
<p>The purpose of this study is to more accurately assess the differences between people and whether these differences can be attributable to lifestyle, diet, body type, age or the presence of associated diseases. The results will be used to create a database of sequence data derived from bacterial DNA in various body sites (e.g. skin, mouth, gut) and details about the participant supplying the sample from these different people that can be used by other researchers when they need samples to compare to what they are studying e.g. certain diseases where gut abnormalities are common.</p>
<p style='font-weight: bold;'>What will happen to you in this study?</p>
<p>You are being asked if you want to be in this study because you signed up for microbial analysis on the American gut website. When you signed up we sent you a sample kit with instructions on how to login to the website so that you can consent to the study formally.</p>
<p>The level of analysis available to you will depend on your contribution but all participants will have to consent to be a part of the study.
The following tests are available:</p>
<ol>
<li>Find out who is in your microbiome - which bacteria and other microbes that are similar to bacteria called archaea are present in your child's sample ($99/swab kit);</li>
<li>You plus the world - This is two kits: 1 for your child and one for someone from Africa, South America or Asia. Your support of for the second sample will allow us to sequence more people from around the world as part of our ongoing research ($129/kit);</li>
<li>Microbes for two, three or four - which bacteria and other microbes that are similar to bacteria called archaea are present in your child's sample and one, two or three other samples;</li>
<li>A week of feces - seven stool swab samples to be used any way you want - to track the effects of an antibiotic on your gut, effect of foreign travel e.g. ($500/7sample kit);</li>
<li>All in the Family - Where bacterial DNA is sliced up into fragments and then reassembled to see what genes are present (also called "shallow shotgun metagenomic analysis") of up to four fecal samples with analysis of the pathways used by bacteria to signal other bacteria or within themselves;</li>
<li>Beyond Bacteria - Deeper shotgun metagenome and virome characterization (where bacterial DNA is sliced up into fragments and then reassembled to see what genes are present making use of additional gene parts that can tell us if there are any associated viruses or virus products that "talk " with bacteria, fungus and parasites that may be present in the sample from your gut. Requires shipment of a whole stool sample (materials and return FedEx postage included) ($2500/kit);</li>
<li>Functional Feces - Additional characterization of gut samples over time (up to 7 stool samples, providing an analysis of the variability of functions over time. Here too the DNA is sliced up into fragments and then reassembled to see what genes are present (also called "shotgun metagenomic analysis").</li>
</ol>
<p>We will analyze all samples where the consent form and questionnaire is completed. The samples in the project (including yours) will be analyzed and published as a scientific article defining the range of diversity in the human microbiome. You will get a link to view, download and print a high-resolution certificate suitable for framing of your results and access to a more detailed list of the different organisms present in your sample (taxonomy summary).</p>
<p>We would like you to understand from the consent what we will do with your sample and what you will get in return.</p>
<p>We will ask you to complete an online questionnaire about you your lifestyle and what you eat. We estimate that this should take no more than 30 minutes. You will then sample a part of your body (of interest to you) with a sterile Q-tip like swab by rubbing the surface of your skin, rubbing the surface of your tongue or sampling your stool by inserting the tip of the swab into used toilet tissue. You can also sample other parts of your body - ear, nose, vagina, scalp, sole of foot. The swabs should be returned to us in the envelope provided using regular US mail service. DNA will be extracted from the sample and amplified by PCR (polymerase chain reaction) and then sequenced to see what bacteria are present and in what proportion in your sample. We estimate that it will take 2 months for you to learn the results.</p>
<p>For the Beyond Bacteria package you will submit a whole stool sample in a designated collection device on special ice packs (that reliably cool the sample to -20 degrees celsius/-4 degrees Fahrenheit) in a container that we will provide. The results for "Beyond Bacteria" and "All in the family" will take longer to analyze because more extensive analysis is provided. Results will be uploaded to your American Gut account when they are available. We are also asking you to consent to having your sample or the bacterial DNA from it to be used in future studies.</p>
<p style='font-weight: bold;'>Please Note: The sequencing is not for diagnostic purposes and does not target human DNA.
<p style='font-weight: bold;'>How much time will each study procedure take, what is your total time commitment, and how long will the study last?</p>
<p>To complete the online questionnaire should take 30 minutes or less. Each sample you send can be obtained in 5 minutes or less. We expect the study to continue for 5 years but your results will be available to you before the end of the study (usually within 2 months of us receiving the sample). You can elect to sample yourself more than once. If your personal details change (e.g. address, or your heath status) we request that you voluntarily re-enter your responses to the questionnaire.</p>
<p style='font-weight: bold;'>What risks are associated with this study?</p>
<p>The sampling techniques have been used for ~5 years with no reported side effects. We do not target the human DNA that may be in the sample so personal information about your genome will not be available to us. The investigation personnel have taken precautions to ensure that there is minimal risk of loss of confidentiality. Should confidentiality be compromised, the implications to you are minimal since the results are not diagnostic and have no implications for insurance companies that could compromise your insurability.</p>
<p>Because this is a research study, there may be some unknown risks that are currently unforeseeable. You will be informed of any significant new findings.</p>
<p style='font-weight: bold;'>What are the alternatives to participating in this study?</p>
<p>The study is entirely voluntary and not participating will have no consequence. There is no alternative test.</p>
<p style='font-weight: bold;'>What benefits can be reasonably expected?</p>
<p>There is no direct benefit to you for participating in this study. We believe that there may be natural curiosity to know what microbes are in your sample and how this compares to other people of the same gender and age. The investigator, however, will learn more about the human microbiome in health and disease and provide a valuable resource for other researchers in other studies. Your contribution to the project may be eligible as a tax-deduction. The receipt will be sent to you from the site that handles financial contributions.</p>
<p>We will analyze all samples where the consent form and questionnaire is completed. The samples in the project (including yours) will be analyzed and published as a scientific article. You will get a link to view, download and print a high-resolution certificate suitable for framing of your results and access to more detailed taxa report of your results.</p>
<p>The results from analysis of your sample/s cannot be used by you or your doctor to confirm a clinical diagnosis and we are not testing for infectious disease.</p>
<p style='font-weight: bold;'>Can you choose to not participate or withdraw from the study without penalty or loss of benefits?</p>
<p>Participation in research is entirely voluntary. You may refuse to participate or withdraw at any time without penalty or loss of benefits to which you are entitled. If you decide that you no longer wish to continue in this study, you will be requested to contact the American Gut Project helpline to inform us of your intent to withdraw. If your sample has not been processed you may request a refund which will be processed through the site where you contributed to the project.</p>
<p>You will be told if any important new information is found during the course of this study that may affect your wanting to continue.</p>
<p style='font-weight: bold;'>Can you be withdrawn from the study without your consent?</p>
<p>You may be withdrawn from the study if you do not complete the consent. You may also be withdrawn from the study if you do not follow the instructions given you by the study personnel.</p>
<p style='font-weight: bold;'>Will you be compensated for participating in this study?</p>
<p>You will not be financially compensated in this study.</p>
<p style='font-weight: bold;'>Are there any costs associated with participating in this study?</p>
<p>You will be asked to contribute money to the project commensurate with the investigation you request ($99 for one sample, $1500 for "All in the family" (shallow shotgun metagenomic sequencing) and $2500 for "Beyond Bacteria" (deeper shotgun metagenome and virome characterization of one sample, plus additional sequencing). A receipt will be sent to you after you pay for the analysis you are requesting. These contributions are used to partially finance the project. Any additional funds required are provided from the funds UCSD has provided to Dr. Knight to set up his laboratory.</p>
<p style='font-weight: bold;'>What if you are injured as a direct result of being in this study?</p>
<p>If you are injured as a direct result of participation in this research, the University of California will provide any medical care you need to treat those injuries. The University will not provide any other form of compensation to you if you are injured. You may call the Human Research Protections Program Office at (858) 657-5100 for more information about this, to inquire about your rights as a research subject or to report research-related problems.</p>
<p style='font-weight: bold;'>What about your confidentiality?</p>
<p>Research records will be kept confidential to the extent allowed by law. All data about you that is entered on the web site is stored on a password-protected server located at the SDSC (San Diego Supercomputer Center) card access controlled facility at UCSD. Financial information from participants contributing to the project is not accessible to the researchers. The code key (that relates participant personal information to sample barcodes) is retained on a separate password-protected server that is accessible only to the PI, Co-I, sample coordinator and the database coders. All analysis is done on de-identified data and the data deposited in a public repository for use by other investigators, is similarly de-identified. Research records may be reviewed by the UCSD Institutional Review Board.</p>
<p>You will provide information about yourself that could allow you to be identified if it was made public e.g. name, age, birthdate, address. We have made every effort to ensure that you cannot be identified from the data you supply about yourself but retaining critical information like gender, age without compromising your personal information or the data integrity.</p>
<p>We may need to report information about known or reasonably suspected incidents of abuse or neglect of a child, dependent adult or elder including physical, sexual, emotional, and financial abuse or neglect. The only way we could discover such abuse is if it is self-reported by the participant or the legal guardian, so this is not likely. If any investigator has or is given such information, he or she may report such information to the appropriate authorities.</p>
<p style='font-weight: bold;'>Who can you call if you have questions?</p>
<p>If you have questions or research-related problems, you may reach Rob Knight at 858-246-1194 or contact Elaine Wolfe at 858-246-1964.</p>
<p>You may call the Human Research Protections Program Office at (858) 657-5100 to inquire about your rights as a research subject or to report research-related problems.</p>
<p style='font-weight: bold;'>Your Signature and Consent</p>
<p>You have received a copy of this consent document and a copy of the "Experimental Subject's Bill of Rights" to keep.</p>
<p>You agree to participate.</p>''',
'CONSENT_YOUR_CHILD': '''<p align='center'>University of California, San Diego<br/>
Parent Consent for Child to Act as a Research Subject<br/></p>
<p align='center' style='font-weight: bold;'>American Gut Project</p>
<p style='font-weight: bold;'>Who is conducting the study, why your child been asked to participate, how your child was selected, and what is the approximate number of participants in the study?</p>
<p>Dr. Rob Knight is conducting a research study to find out more about the trillions of bacteria and other organisms (called the microbiome) that live in and on the body. You are volunteering your child for this study because you want to know more about the microbiome of your child. Children like all humans have a unique microbiome and including them in the study will help elucidate the development of the microbiome. There will be approximately 20,000 participants in the study from across the USA and from other countries around the world.</p>
<p style='font-weight: bold;'>Why is this study being done?</p>
<p>The purpose of this study is to more accurately assess the differences between people and whether these differences can be attributable to lifestyle, diet, body type, age or the presence of associated diseases. The results will be used to create a database of sequence data derived from bacterial DNA in various body sites (e.g. skin, mouth, gut) and details about the child participant supplying the sample that can be used by other researchers when they need samples to compare to what they are studying e.g. certain diseases where gut abnormalities are common.</p>
<p style='font-weight: bold;'>What will happen to your child in this study and which procedures are standard of care and which are experimental?</p>
<p>You are being asked if you want your child to be in this study because you signed up for microbial testing on the American gut website. When you signed up we sent you a sample kit with instructions on how to login to the website so that you can consent to the study formally.</p>
<p>The level of analysis available to you will depend on your contribution but all participants will have to consent to be a part of the study.
The following tests are available:</p>
<ol>
<li>Find out who is in your microbiome - which bacteria and other microbes that are similar to bacteria called archaea are present in your child's sample ($99/swab kit);</li>
<li>You plus the world - This is two kits: 1 for your child and one for someone from Africa, South America or Asia. Your support of for the second sample will allow us to sequence more people from around the world as part of our ongoing research ($129/kit);</li>
<li>Microbes for two, three or four - which bacteria and other microbes that are similar to bacteria called archaea are present in your child's sample and one, two or three other samples;</li>
<li>A week of feces - seven stool swab samples to be used any way you want - to track the effects of an antibiotic on your gut, effect of foreign travel e.g. ($500/7sample kit);</li>
<li>All in the Family - Where bacterial DNA is sliced up into fragments and then reassembled to see what genes are present (also called "shallow shotgun metagenomic analysis") of up to four fecal samples with analysis of the pathways used by bacteria to signal other bacteria or within themselves;</li>
<li>Beyond Bacteria - Deeper shotgun metagenome and virome characterization (where bacterial DNA is sliced up into fragments and then reassembled to see what genes are present making use of additional gene parts that can tell us if there are any associated viruses or virus products that "talk " with bacteria, fungus and parasites that may be present in the sample from your gut. Requires shipment of a whole stool sample (materials and return FedEx postage included) ($2500/kit);</li>
<li>Functional Feces - Additional characterization of gut samples over time (up to 7 stool samples, providing an analysis of the variability of functions over time. Here too the DNA is sliced up into fragments and then reassembled to see what genes are present (also called "shotgun metagenomic analysis").</li>
</ol>
<p>We will analyze all samples where the consent form and questionnaire is completed. The samples in the project (including your child's) will be analyzed and published as a scientific article defining the range of diversity in the human microbiome. You will get a link to view, download and print a high-resolution certificate suitable for framing of your results and access to more detailed list of the different organisms present in your sample (taxonomy summary).</p>
<p>We would like you to understand from the consent what we will do with your child's sample and what you will get in return.</p>
<p>We will ask you to complete an online questionnaire about your child's lifestyle and what he/she eats. We estimate that this should take no more than 30 minutes. You will then sample a part of your child's body (of interest to you) with a sterile Q-tip like swab by rubbing the surface of your skin, rubbing the surface of your tongue or sampling your stool by inserting the tip of the swab into used toilet tissue. You can also sample other parts of her/his body - ear, nose, vagina, scalp, sole of foot. The swabs should be returned to us in the envelope provided using regular US mail service. DNA will be extracted from the sample and amplified by PCR (polymerase chain reaction) and then sequenced to see what bacteria are present and in what proportion in your sample. We estimate that it will take 2 months for you to learn the results.</p>
<p>For the Beyond Bacteria package you will submit a whole stool sample in a designated collection device on special ice packs (that reliably cool the sample to -20 degrees celsius/-4 degrees Fahrenheit) packed in a container that we will provide. The results for "Beyond Bacteria" and "All in the family" will take longer because more extensive analysis is provided. Results will be uploaded to your American Gut account when they are available. We are also asking you to consent to having your child's sample or the bacterial DNA from it to be used in future studies.</p>
<p style='font-weight: bold;'>Please Note: The sequencing is not for diagnostic purposes.</p>
<p style='font-weight: bold;'>How much time will each study procedure take, what is your child's total time commitment, and how long will the study last?</p>
<p>To complete the online questionnaire should take 30 minutes or less. Each sample you send can be obtained in 5 minutes or less. We expect the study to continue for 5 years but the results will be available to you before the end of the study (usually within 2 months of us receiving the sample). You can choose to sample your child more than once. If your child's personal details change (e.g. address, or heath status) we request that you voluntarily re-enter that information into the questionnaire.</p>
<p style='font-weight: bold;'>What risks are associated with this study?</p>
<p>The sampling techniques have been used for ~5 years with no reported side effects. We do not target the human DNA that may be in the sample so personal information about your child's genome will not be available. The investigation personnel have taken precautions to ensure that there is minimal risk of loss of confidentiality. Should confidentiality be compromised, the implications to your child are minimal since the results are not diagnostic and have no implications for insurance companies that could compromise your child's insurability.</p>
<p>Because this is a research study, there may be some unknown risks that are currently unforeseeable. You will be informed of any significant new findings.</p>
<p style='font-weight: bold;'>What are the alternatives to participating in this study?</p>
<p>The study is entirely voluntary and not allowing your child to participate will have no consequence. There is no alternative test.</p>
<p style='font-weight: bold;'>What benefits can be reasonably expected?</p>
<p>There is no direct benefit to your child for participating in this study. The investigator, however, may learn more about the human microbiome in health and disease and provide a valuable resource for other researchers in other studies.</p>
<p style='font-weight: bold;'>Can you choose to not to have your child participate or withdraw from the study without penalty or loss of benefits?</p>
<p>There is no direct benefit to you or your child for participating in this study. We believe that there may be natural curiosity to know what bacteria are in your sample and how this compares to other people of the same gender and age. The investigator, however, will learn more about the human microbiome in health and disease and provide a valuable resource for other researchers in other studies. Your contribution to the project may be eligible as a tax-deduction. The receipt will be sent to you from the site that handles financial contributions.</p>
<p style='font-weight: bold;'>We will analyze all samples where the consent form and questionnaire is completed. The samples in the project (including your child's) will be analyzed and published as a scientific article. You will get a link to view, download and print a high-resolution certificate suitable for framing of your results and access to more detailed taxa report of your results.</p>
<p>The results from analysis of your sample/s cannot be used by you or your doctor to confirm a clinical diagnosis and we are not testing for infectious disease.</p>
<p style='font-weight: bold;'>Can your child be withdrawn from the study without your consent?</p>
<p>Participation in research is entirely voluntary. You may refuse to participate or withdraw your child from the study at any time, without penalty or loss of benefits to which you are entitled. If you decide that you no longer wish to continue in this study, you will be requested to contact the American Gut Project helpline to inform us of your intent to withdraw. If your sample has not been processed you may request a refund which will be processed through the site where you contributed to the project.</p>
<p>You will be told if any important new information is found during the course of this study that may affect your wanting to continue.</p>
<p style='font-weight: bold;'>Will you be compensated for participating in this study?</p>
<p>You will not be financially compensated in this study.</p>
<p style='font-weight: bold;'>Are there any costs associated with participating in this study?</p>
<p>You will be asked to contribute money to the project commensurate with the investigation you request ($99 for one sample, $1500 for "All in the family" (shallow shotgun metagenomic sequencing) and $2500 for "Beyond Bacteria" (deeper shotgun metagenome and virome characterization of one sample, plus additional sequencing). A receipt will be sent to you after you pay for the analysis you are requesting. These contributions are used to partially finance the project. Any additional funds required are provided from the funds UCSD has provided to Dr. Knight to set up his laboratory.</p>
<p style='font-weight: bold;'>What if your child is injured as a direct result of being in this study?</p>
<p>If your child is injured as a direct result of participation in this research, the University of California will provide any medical care you need to treat those injuries. The University will not provide any other form of compensation to you if your child is injured. You or your child may call the Human Research Protections Program Office at (858) 657-5100 for more information about this, to inquire about your rights as a research subject or to report research-related problems.</p>
<p style='font-weight: bold;'>What about your confidentiality?</p>
<p>Research records will be kept confidential to the extent allowed by law. All data about your child that is entered on the web site is stored on a password-protected server located at the SDSC (San Diego Supercomputer Center) card access controlled facility at UCSD. Financial information from participants contributing to the project is not accessible to the researchers. The code key (that relates participant personal information to sample barcodes) is retained on a separate password-protected server that is accessible only to the PI, Co-I, sample coordinator and the database coders. All analysis is done on de-identified data and the data deposited in a public repository for use by other investigators, is similarly de-identified. Research records may be reviewed by the UCSD Institutional Review Board.</p>
<p>You will provide information about yourself that could allow you to be identified if it was made public e.g. name, age, birthdate, address. We have made every effort to ensure that you cannot be identified from the data you supply about yourself but retaining critical information like gender, age without compromising your personal information or the data integrity.</p>
<p>We may need to report information about known or reasonably suspected incidents of abuse or neglect of a child, dependent adult or elder including physical, sexual, emotional, and financial abuse or neglect. The only way we could discover such abuse is if it is self-reported by the participant or the legal guardian, so this is not likely. If any investigator has or is given such information, he or she may report such information to the appropriate authorities.</p>
<p style='font-weight: bold;'>Who can you call if you have questions?</p>
<p>If you have questions or research-related problems, you may reach Rob Knight at 858-246-1194 or contact Elaine Wolfe at 858-246-1964.</p>
<p>You may call the Human Research Protections Program Office at (858) 657-5100 to inquire about your rights as a research subject or to report research-related problems.</p>
<p style='font-weight: bold;'>Your Signature and Consent</p>
<p>You have received a copy of this consent document and a copy of the "Experimental Subject's Bill of Rights" to keep.</p>
<p>You agree to allow your child to participate.</p>''',
'BILL_OF_RIGHTS': '''Experimental Subject's Bill of Rights''',
'TEXT_I_HAVE_READ_1': 'I have read (or someone has read to me) this form. I am aware that I am being asked to be in a research study. I voluntarily agree to be in this study.',
'TEXT_I_HAVE_READ_SIMPLIFIED': 'Yes, you will be in this research study.',
'PERSON_ATTAINING_ASSENT': 'Signature Of Person Obtaining Assent',
'TEXT_ASSENT_WITNESS': 'In my judgment, the participant is voluntarily and knowingly giving assent and possesses the legal capacity to give assent to participate in the study.',
'OBTAINER_NAME': 'Name of person obtaining assent',
'TEXT_I_HAVE_READ_PARENT': 'I have read (or someone has read to me) this form. I am aware that my child is being asked to be in a research study. I voluntarily agree for my child to be in this study.',
'PARTICIPANT_NAME': 'Name of participant',
'PARTICIPANT_EMAIL': 'Email of participant',
'PARTICIPANT_PARENT_1': 'Parent/Guardian name',
'PARTICIPANT_PARENT_2': 'Parent/Guardian name of second parent',
'PARTICIPANT_DECEASED_PARENTS': 'One parent/guardian is deceased or unable to consent.',
'DATE_SIGNED': 'Date Signed'
}
_MAP = {
'MAP_TITLE': 'Map Key',
'MAP_PARTICIPANT': ' Participant',
'MAP_KIT': ' Kit Verified',
'MAP_SAMPLE': ' Sample(s) Logged',
}
_FORGOT_PASSWORD = {'ENTER_ID_EMAIL': 'Enter your Kit ID and email',
'KIT_ID': 'Kit ID:',
'EMAIL': 'E-mail',
'EMAIL_RESET_PASSWORD': 'You will receive an email shortly with instructions to reset your password. Please check your email because you need to reset your password within two hours.',
'EMAIL_FAILED': '<p>There was a problem sending you the password reset code. Please contact us directly at <a href=\"mailto:%(help_email)s\" target=\"_blank\">%(help_email)s</a>.</p><p>Email contained: </p>' % {'help_email': media_locale['HELP_EMAIL']},
'NO_RECORD': '<p style="color:red;">This information does not match our records</p><p>Please email <a href="mailto:%(help_email)s">directly</a> for further assistance<p>' % {'help_email': media_locale['HELP_EMAIL']},
'SEND_EMAIL': 'Send email'}
_ERROR = {
'ERROR_OCCURED': 'AN ERROR HAS OCCURED!',
'ERROR_CONTACT': "The error has been logged and we will look into it. Please go back to the main page."
}
_RETREIVE_KITID = {
'UNKNOWN_EMAIL': 'This email address is not in our system',
'ENTER_EMAIL': 'Please Enter Your Email',
'SEND_EMAIL': 'Send Kit ID Email',
'EMAIL_SUCCESS': 'Your kit ID has been emailed to you. Please check your email.',
'EMAIL_CANTSEND': 'Mail can be sent only from microbio.me domain.',
'EMAIL_EXCEPTION': 'There was a problem sending you the kit ID. Please contact us directly at <a href=\"mailto:%(help_email)s\">%(help_email)s</a>.' % {'help_email': media_locale['HELP_EMAIL']},
'EMAIL_PROMPT': 'Email:'
}
_ADD_SAMPLE = {
'NEW_SAMPLE_TITLE': 'Log a new sample for',
'NEW_SAMPLE_DESCRIPTION_1': 'Choose the barcode from your kit that corresponds to the sample you are logging.',
'NEW_SAMPLE_DESCRIPTION_2': 'It is very important that the sample barcode matches <strong>exactly</strong> for downstream analysis steps.',
'SITE_SAMPLED': 'Site Sampled',
'DATE': 'Date',
'DATE_EXAMPLE': ' mm/dd/yyyy (Example: 05/07/2013)',
'TIME': 'Time',
'TIME_EXAMPLE': ' hh:mm AM/PM (Example: 04:35 PM)',
'NOTES': 'Additional Notes (optional)',
}
_REGISTER_USER = {
'ENTER_KIT_ID': "Please enter your kit ID",
'ENTER_PASSWORD': 'Please enter your kit password',
'ENTER_NAME': 'Please enter your name',
'ENTER_EMAIL': 'Please enter your email',
'REQUIRED_EMAIL': 'You must supply a valid email',
'ENTER_ADDRESS': 'Please enter your address',
'ENTER_CITY': 'Please enter your city',
'ENTER_STATE': 'Please enter your state',
'ENTER_ZIP': 'Please enter your zip',
'ENTER_COUNTRY': 'Please enter your country',
'REQUIRED_ZIP': 'Your zip must be 10 or fewer characters',
'EMAIL': 'Email',
'NAME': 'Name',
'ADDRESS': 'Address',
'CITY': 'City',
'STATE': 'State',
'ZIP': 'Zip',
'COUNTRY': 'Country',
'PASSWORD': 'Password',
'KIT_ID': 'Kit ID',
'SUBMIT': 'Submit My Information'
}
_ADDENDUM = {
'TITLE': 'American Gut Addendum',
'INTRO': 'We\'d like to note that in general these data allow you to understand how similar or different you are to other people in terms of the bacterial composition of the sample you sent. The information about the microbes is at as fine level of a taxonomic resolution as we were able to achieve with our sequencing methods, and varies for different groups of microbes. Currently, we cannot tell you what it means if you have more or less of a certain bacteria than other people. Gut microbiome research is still new, and we have a lot to learn. Your participation in the American Gut Project will allow us to learn more, and we hope to update you with new findings as they emerge.',
'LEARN_MORE': 'Learn more about your certificate by clicking on a plot or table',
'MOD01ALT': 'Your American Gut Sample',
'MOD01bALT': 'Michael Pollan',
'MOD02ALT': 'What\'s in your sample?',
'MOD11ALT': 'Taxonomy Bar Charts',
'MOD12ALT': 'Major Phyla',
'MOD13ALT': 'Abundant Microbes',
'MOD14ALT': 'Enriched Microbes',
'MOD15ALT': 'Rare Microbes',
'MOD03ALT': 'How do your gut microbes compare to others?',
'MOD08ALT': 'PCoA of BodySites with HMP',
'MOD09ALT': 'PCoA of diets and age',
'MOD10ALT': 'PCoA of American Gut Data',
'RESULTS_CAPTION': 'Your certificate is designed to help you determine what was found in your sample, and how you compare to other people. Click on a graph or table to learn more.',
'SAMPLE_TITLE': 'What\'s in your %(PROJECT_TITLE)s sample?' % media_locale,
'TAXONOMY': 'Taxonomy',
'TAXONOMY_INTRO': 'Taxonomy is a system scientists use to describe all life on the planet. Taxonomy is commonly referred to as an organism\'s scientific name. This name allows us to understand how closely related two organisms are to each other. There are seven major levels of taxonomy that go from less specific to more specific. The phylum level represents very broad range of organisms that have <strong>evolved over hundreds of millions of years</strong> whereas the species level represents only a small subset of them that are <strong>much more closely related</strong>. Typically, names at the genus and species levels are written in <em>italics</em> or are <u>underlined</u> (in our tables, they are <em>italicized</em>). For instance, here is the list of taxonomic levels and names for humans and chimpanzees:',
'HUMAN_TAXONOMY': 'Human',
'HUMAN_TAXONOMY_KINGDOM': 'Kingdom: Animalia',
'HUMAN_TAXONOMY_PHYLUM': 'Phylum: Chordata',
'HUMAN_TAXONOMY_CLASS': 'Class: Mammalia',
'HUMAN_TAXONOMY_ORDER': 'Order: Primates',
'HUMAN_TAXONOMY_FAMILY': 'Family: Hominidae',
'HUMAN_TAXONOMY_GENUS': 'Genus: <em>Homo</em>',
'HUMAN_TAXONOMY_SPECIES': 'Species: <em>sapiens</em>',
'CHIMP_TAXONOMY': 'Chimpanzee',
'CHIMP_TAXONOMY_KINGDOM': 'Kingdom: Animalia',
'CHIMP_TAXONOMY_PHYLUM': 'Phylum: Chordata',
'CHIMP_TAXONOMY_CLASS': 'Class: Mammalia',
'CHIMP_TAXONOMY_ORDER': 'Order: Primates',
'CHIMP_TAXONOMY_FAMILY': 'Family: Hominidae',
'CHIMP_TAXONOMY_GENUS': 'Genus: <em>Pan</em>',
'CHIMP_TAXONOMY_SPECIES': 'Species: <em>troglodytes</em>',
'LACTO_TAXONOMY': 'Here is the same list for a common yogurt bacterium (<em>Lactobacillus delbrueckii</em>):',
'LACTO_TAXONOMY_KINGDOM': 'Bacteria',
'LACTO_TAXONOMY_PHYLUM': 'Firmicutes',
'LACTO_TAXONOMY_CLASS': 'Bacilli',
'LACTO_TAXONOMY_ORDER': 'Lactobacillales',
'LACTO_TAXONOMY_FAMILY': 'Lactobacillaceae',
'LACTO_TAXONOMY_GENUS': '<em>Lactobacillus</em>',
'LACTO_TAXONOMY_SPECIES': '<em>delbrueckii</em>',
'BACTAX_LINK': 'For more information on bacterial taxonomy, please refer to the following link: ',
'TOP': 'Back to the top',
'TAX_BARCHART': 'Taxonomy Bar Chart',
'TAX_BARCHART_TEXT_1': 'The taxonomy bar chart shows the abundances of bacterial types at the phylum level in your sample and compares it to other samples. Specifically, it shows you what percentage of all your bacteria belonged to each phyla. We also calculated the average percentage of each bacterial phylum across all samples, across samples from people with a similar diet to the one you reported, across samples from people of the same gender as you, across samples from everyone with a similar BMI to you, across samples from everyone with the same age as you, and for one specific person, Michael Pollan. You can compare the percentage of bacterial phyla in your sample (first bar) to all of these values to get an idea of how similar or different you are.',
'TAX_BARCHART_TEXT_2': '<strong>Firmicutes and Bacteroidetes are the two most abundant bacterial phyla in the human gut, but others are also present.</strong> Please see <a href = "#phyla">Major Bacterial Phyla</a> below for basic descriptions of these phyla.',
'ABUNDANT': 'Abundant Microbes',
'YOUR_ABUNDANT': 'Your most abundant microbes:',
'YOUR_ABUNDANT_TABLE_HEADER': '<th>Taxonomy</th><th>Sample</th>',
'OBSERVED_TAXON_1': '<td class = \'taxa\'>Family Prevotella</td><td class = \'row\'>24.9%</td>',
'OBSERVED_TAXON_2': '<td class = \'taxa\'>Family Ruminococcaceae</td><td class = \'row\'>13.4%</td>',
'OBSERVED_TAXON_3': '<td class = \'taxa\'>Family Lachnospiraceae</td><td class = \'row\'>10.1%</td>',
'OBSERVED_TAXON_4': '<td class = \'taxa\'>Genus <em>Bacteroides</em></td><td class = \'row\'>8.1%</td>',
'TAX_BARCHART_EXP': 'The first table shows the four most abundant groups of microbes in your sample. Although you had other bacteria, these are the ones that you have the most of. The percentages on the right (under "Sample") tell you what percent of all of your bacteria belong to these taxa.',
'ENRICHED': 'Enriched Microbes',
'YOUR_ENRICHED': 'Your most enriched microbes:',
'YOUR_ENRICHED_TABLE_HEADER': '<th>Taxonomy</th><th>Sample</th><th>Population</th><th>Fold</th>',
'YOUR_ENRICHED_1': '<td class = \'taxa\'>Genus <em>Clostridium</em></td><td class = \'row\'>2.5%</td><td class = \'row\'>0.3%</td><td class = \'row\'>7x</td>',
'YOUR_ENRICHED_2': '<td class = \'taxa\'>Genus <em>Finegoldia</em></td><td class = \'row\'>0.7%</td><td class = \'row\'>0.0%</td><td class = \'row\'>17x</td>',
'YOUR_ENRICHED_3': '<td class = \'taxa\'>Genus <em>Prevotella</em></td><td class = \'row\'>24.9%</td><td class = \'row\'>2.6%</td><td class = \'row\'>9x</td>',
'YOUR_ENRICHED_4': '<td class = \'taxa\'>Genus <em>Collinsella</em></td><td class = \'row\'>0.9%</td><td class = \'row\'>0.1%</td><td class = \'row\'>8x</td>',
'ENRICHED_EXP_1': 'The second table shows four microbes that you had more of compared to other people. It is likely that other participants also have these microbes in their sample, but we found substantially higher abundances of them in your sample relative to others. The percentages on the right tell you how many of your total bacteria (under "Sample") or of the total bacteria in an average person&s sample (under "Population") belong to these taxa. Since you have more of these bacteria than most other people, the percentage under "Sample" should be higher than the percentage under "Population".',
'ENRICHED_EXP_2': 'The fold change tells you how many more of these bacteria you have than the average participant. For example, if you have 20% Bacteria A and the average person in the population has 10% Bacteria A, you have twice as many Bacteria A. This would be a twofold (2x) difference. Please note that because the percentages we report on this sheet are rounded (e.g., 0.05% rounded to 0.1%), and your fold differences are calculated from values that are not rounded, the fold differences you see may be slightly distinct than what you would calculate based on the numbers you see.',
'RARE': 'Rare Taxa',
'RARE_TEXT_1': 'This sample included the following rare taxa: Genus <em>Varibaculum</em>, Genus <em>Neisseria</em>, Genus <em>Campylobacter</em>, Unclassified Order ML615J-28.',
'RARE_TEXT_2': 'This line shows four microbes that you have that are not commonly found in the type of sample you provided. Some other people may have them, but most people do not.',
'YOUR_COMPARE': 'How do your gut microbes compare to others?',
'COMPARE_TEXT_1': 'Here, we present three Principle Coordinates Plots. Each point on these plots represents the bacterial composition of one sample from one person. We take all of the information about the abundances of all the bacteria in each sample and compare them to each other using this type of plot. When two points are very close to each other, it means that the types of bacteria in those two samples are very similar. Points that are farther apart represent samples that are less similar to each other. The axes mean nothing in this context. It doesn\'t matter how high or low a point is on the plot. The only thing that matters is how close it is to other points.',
'COMPARE_TEXT_2': 'The large point represents your sample on each plot. This allows you to see how similar (close to) or different (far from) your sample is from others.',
'DIFFERENT_BODY_SITES': 'DIfferent Body Sites',
'DIFFERENT_BODY_SITES_ALT': 'PCoA by body site for AGP and HMP',
'DIFFERENT_BODY_SITES_TEXT': 'This plot lets you compare your sample to samples collected in other microbiome projects from several body sites. The color of each point tells you which project and body site the sample came from. HMP refers to the <a href = \'http://www.hmpdacc.org\'>Human Microbiome Project</a>, funded by the National Institutes of Health. You can see how your sample compared to fecal, oral, and skin samples from the Human Microbiome Project, as well as to fecal, oral, and skin samples from the American Gut Project, the Global Gut Project, and the Personal Genome Project. These samples have been combined in any category not labeled "HMP". The oval around each group of points shows you where an average sample from each project and body site should fall on the plot. These sometimes make it easier to see the patterns all the clusters of points make.',
'DIFFERENT_AGES_POPS': 'Different Ages and Populations',
'DIFFERENT_AGES_POPS_ALT': 'PCoA of international populations colored by age',
'DIFFERENT_AGES_POPS_TEXT': 'This plot lets you compare your sample to other fecal microbiome samples according to age and place of origin. The color of each point indicates the age of the person the sample was collected from, with red being the youngest and purple being the oldest. Also, on this plot, the ovals show where in the world each sample came from. The red oval shows you the area where an average sample from a Western country should fall. The yellow oval shows you where an average sample from an Amerindian population in Venezuela should fall. The blue oval shows you where an average sample from Malawi should fall. These data are from <a href = \'http://www.nature.com/nature/journal/v486/n7402/abs/nature11053.html\'>Yatsunenko et al. 2012</a>. We used these populations as a comparison to your sample since a large number of people with diverse ages were sampled in these populations. We have fewer data from other populations in other parts of the world.',
'AG_POPULATION': 'The American Gut Population',
'AG_POPULATION_ALT': 'PCoA of American Gut population colored by Firmicutes',
'AG_POPULATION_TEXT': 'This plot lets you compare your sample to other fecal microbiome samples we collected from American Gut participants. The color indicates the relative abundance of Firmicutes bacteria each sample had with red being the lowest and purple being the highest. If you had a lot of Firmicutes bacteria, then your sample should be purple, and you can look for other purple samples to see how similar your whole bacterial community is to other people with high amounts of Firmicutes. As in the other plots, the location of the point along the axes means nothing. Only its relative position compared to the other points is meaningful.',
'MAJOR_PHYLA': 'Major Bacterial Phyla',
'MAJOR_PHYLA_FIRMICUTES_HEADER': 'Firmicutes',
'MAJOR_PHYLA_FIRMICUTES_TEXT': 'A phylum of bacteria with generally Gram-positive (retain crystal violet dye) staining cell wall structure. The names is derived from Latin <em>firmus</em> for strong and <em>cutis</em> for skin. The cells are in the form of spheres called cocci (singular coccus) or rods called bacilli (singular bacillus). Firmicutes encompass bacteria that can be found in many different environments ranging from soil to wine to your gut. There are currently more than 274 genera representing 7 different classes of which Clostridia (anaerobes - no oxygen) and Bacilli (obligate or facultative aerobes) are the most significant. Both classes are predominantly saprophytic (getting nourishment from dead or decaying organic matter) playing an important role in the decomposition and nutrient mineralization processes, but also contain a few human pathogens (e.g. <em>Clostridium tetani</em> or <em>Bacillus anthracis</em>).',
'MAJOR_PHYLA_BACTEROIDETES_HEADER': 'Bacteroidetes',
'MAJOR_PHYLA_BACTEROIDETES_TEXT': 'A phylum of Gram-negative bacteria, rod-shaped, present in all sorts of environments such as soil, sediments, and fresh and marine waters. Most are saprophytic and involved in carbon cycling. Often abundant in nutrient-rich habitats and so they are a major component of animal guts where they can act as degraders of complex carbohydrates and proteins but also as pathogens. Their representatives are organized within 4 major classes among which the genus <em>Bacteroides</em> in the class of Bacteroidia is the most prevalent and the most studied. Bacteroidetes together with Firmicutes make up the majority of gut bacteria. The ratio of these two types of bacteria (specifically the dominance of Firmicutes over Bacteroidetes) may be linked to obesity.',
'MAJOR_PHYLA_PROTEOBACTERIA_HEADER': 'Proteobacteria',
'MAJOR_PHYLA_PROTEOBACTERIA_TEXT': 'A phylum of Gram-negative bacteria. They are named after a Greek God Proteus to illustrate their variety of forms. They are organized in 6 recognized classes and represent all types of metabolisms ranging from heterotrophic to photosynthetic to chemoautotrophic. They include many well-known pathogens (e.g., <em>Escherichia</em>, <em>Helicobacter</em>, <em>Salmonella</em>, <em>Vibrio</em>) as well as free-living types that can fix nitrogen (convert nitrogen present in the atmosphere into ammonia, a form of nitrogen available for plants\' uptake).',
'MAJOR_PHYLA_ACTINOBACTERIA_HEADER': 'Actinobacteria',
'MAJOR_PHYLA_ACTINOBACTERIA_TEXT': 'A phylum of Gram-positive bacteria both terrestrial and aquatic. They are mostly recognized as excellent decomposers of resilient organic compounds such as cellulose or chitin. Although some can be plant and animal pathogens, others are more known as producers of antibiotics (e.g. Streptomyces). In their body form, many resemble fungi by forming mycelial-like filaments.',
'MAJOR_PHYLA_VERRUCOMICROBIA_HEADER': 'Verrucomicrobia',
'MAJOR_PHYLA_VERRUCOMICROBIA_TEXT': 'A relatively new phylum with only a handful of described species. Although not the most abundant, they seem to be always present in soil, aquatic environments, and feces. Most likely they are involved in the decomposition of organic matter, with no known pathogens. While some may be autotrophs, others can be internal symbionts of microscopic eukaryotes such as protists or nematodes. Their name is derived from a wart-like appearance (<em>verruca</em> means wart) but they do not cause warts.',
'MAJOR_PHYLA_TENERICUTES_HEADER': 'Tenericutes',
'MAJOR_PHYLA_TENERICUTES_TEXT': 'A phylum of Gram-negative bacteria without a cell wall (<em>tener</em> - soft, <em>cutis</em> - skin) which are organized in a single class. Nutritionally, they represent variable pathways ranging from aerobic and anaerobic fermenters to commensals to strict pathogens of vertebrates (e.g., fish, cattle, wildlife). Among the best studied are Mycoplasmas with a fried egg-like shape and <em>Mycoplasma pneumoniae</em> is one of the best known examples of human pathogens causing pneumonia, bronchitis, and other respiratory conditions.',
'MAJOR_PHYLA_CYANOBACTERIA_HEADER': 'Cyanobacteria',
'MAJOR_PHYLA_CYANOBACTERIA_TEXT': 'A phylum of photosynthetic (plant-like) bacteria. The name comes from their blue pigment (in Greek <em>kyanos</em> - blue). They can grow as single cells or form filamentous colonies. They are extremely successful in every imaginable environment including places where other organisms are extremely limited like hot springs or cold Antarctic bare rocks. Through their incredible diversity and abundance, they contribute significantly to the global cycle of oxygen.',
'MAJOR_PHYLA_FUSOBACTERIA_HEADER': 'Fusobacteria',
'MAJOR_PHYLA_FUSOBACTERIA_TEXT': 'A phylum of rod-shaped Gram-negative bacteria. Known primarily as fermentative species but some can be pathogens. Can occur in anoxic (no oxygen) sediments as well as intestinal habitats of animals including humans.',
'CONTRIB': 'Contributors',
'SUPPORTERS': 'Supporters',
'SPONSORS': 'Sponsors',
'COLLABORATORS': 'Collaborators'
}
_PORTAL = {
'GREETING': 'Hi %(user_name)s! Please follow the steps below.',
'VERIFY_TAB': 'Verify Your Kit',
'ADD_SOURCE_TAB': 'Add Source <br>Survey',
'TAKE_SAMPLE_TAB': 'Take a Sample',
'LOG_SAMPLE_TAB': 'Log a Sample',
'MAIL_TAB': 'Mail Samples <br>to Us',
'SEQ_TAB': 'Sequencing &<br>Results',
'VERIFICATION_HEADER_1': 'Verification',
'VERIFICATION_TEXT_1': 'We ask you to verify that you received the correct sample tubes and kit. Using a <strong>Verification Code</strong> helps us ensure that you receive the correct barcodes and Credentials Sheet.',
'VERIFICATION_TEXT_2': 'our <strong>Verification Code</strong> will be sent to you via email to the address that you entered when you made your contribution; if you made an anonymous contribution, please <a href="%(sitebase)s/authed/help_request/">contact us directly</a>.' % {'sitebase': media_locale['SITEBASE']},
'VERIFICATION_TEXT_3': 'If you cannot find your <strong>Verification Code</strong>, please make sure to check your spam folder. If you still cannot find the code, please <a href="%(sitebase)s/authed/help_request/">contact us</a>.' % {'sitebase': media_locale['SITEBASE']},
'RESEND_VERIFICATION': 'Resend verification code',
'VERIFICATION_HEADER_2': 'Verify your identity and kit barcode(s)',
'VERIFICATION_CODE_PROMPT': 'Please enter the verification code sent to your email address <a href="#" class="help" title="If you did not recieve a verification code in your email from American Gut, please check your spam folder. If you still can not find it, contact %(help_email)s">(?)</a>' % {"help_email": media_locale["HELP_EMAIL"]},
'VERIFICATION_CODE_ERROR': 'The kit verification code you entered does not match our records. Please double-check the code you entered. If you continue to experience difficulties, please <a href=/authed/help_request/>contact us</a>.',
'VERIFY_BARCODES': 'Please verify that the barcode(s) you received in the mail match the barcode(s) here',
'VERIFY_BARCODES_POPUP': 'The barcode you need to verify is located on the outside of your sample tube.',
'SAMPLE_SOURCE_HEADER_1': 'Sample Source',
'SAMPLE_SOURCE_TEXT_1': 'There are three different sample sources that you can choose from for the %(project)s. The sources are human, animal and environmental. The buttons below will allow you to add a new sample source.',
'SAMPLE_SOURCE_TEXT_2': 'If you add a <strong>human</strong> or <strong>animal</strong> source, you will be asked to complete a survey',
'SAMPLE_SOURCE_TYPE_HUMAN': 'Human',
'SAMPLE_SOURCE_TYPE_ANIMAL': 'Animal',
'SAMPLE_SOURCE_TYPE_ENVIRONMENTAL': 'Environmental',
'SURVEY_HEADER_1': 'Survey',
'SURVEY_TEXT_1': 'If you are taking a human or animal sample, we ask that you complete a survey.',
'SURVEY_TEXT_2': 'The survey will take <strong>30-45 minutes</strong> for a human subject, or <strong>10 minutes</strong> for an animal subject. You <strong>cannot</strong> save in the middle of the survey, so please set aside enough time to complete the entire survey.',
'SURVEY_TEXT_3': 'If you are taking a human sample, the survey includes demographic, lifestyle, medical and diet questions. All survey questions are optional.',
'SURVEY_TEXT_4': 'The diet questions do not require a food diary, but please be prepared to answer questions about your eating habits. A screenshot of the dietary questions is shown below.',
'SAMPLE_STEPS_HEADER_1': 'Before Taking Your Samples',
'SAMPLE_STEPS_TEXT_1': 'These are the steps involved in taking a sample:',
'SAMPLE_STEPS_TEXT_2': '<li>Make sure you have <a href="#" onclick="selectTab(\'source\')">added your sample source and complete the required survey(s)</a></li><li>Remove the sample swabs from the sample tube</li><li>Collect your sample following the guidelines below</li><li>Place sample swabs into the sample tube</li>',
'SAMPLE_STEPS_TEXT_3': 'These sample collection instructions are very important, please read through them <strong>before</strong> beginning to take your sample. Deviations will cause issues with sample processing, sequencing, and data analysis. We cannot guarantee that we will be able to process your sample if the instructions are not followed, and <strong>we cannot offer replacements if instructions were not followed</strong>. Please do not hesitate to ask us questions at <a href="%(sitebase)s/authed/help_request/">%(help_email)s</a>.' % {"help_email": media_locale["HELP_EMAIL"], 'sitebase': media_locale['SITEBASE']},
'SAMPLE_STEPS_HEADER_2': 'Taking Your Samples',
'SAMPLE_STEPS_TEXT_4': 'Once you have removed the sample tube, only handle the sample swab by the red cap.',
'SAMPLE_STEPS_TEXT_5': 'For a <strong>fecal sample</strong>, rub both cotton tips on a fecal specimen (a used piece ofbathroom tissue). Collect a small amount of biomass. Maximum collection would be to saturate 1/2 a swab. <strong>More is not better!</strong> The ideal amount of biomass collected is shown below.',
'SAMPLE_STEPS_TEXT_6': 'For an <strong>oral sample</strong>, firmly rub both sides of both cotton tips on the surface of the tongue for 20 seconds. Take great caution not to touch the cheeks, teeth, or lips.',
'SAMPLE_STEPS_TEXT_7': 'For a <strong>skin sample</strong>, firmly rub both sides of both cotton tips over the skin surface being sampled for 20 seconds.',
'SAMPLE_STEPS_TEXT_8': 'For an <strong>other/environmental sample</strong>, firmly rub both sides of both cotton tips over the surface being sampled for 20 seconds.',
'SAMPLE_STEPS_TEXT_9': 'After you have finished taking your sample, return the swabs to the sample tube and push the red cap on firmly.',
'LOG_SAMPLE_HEADER_1': 'Logging Samples',
'LOG_SAMPLE_TEXT_1': 'Please write the sample site, date, and time on the sampling tube.',
'LOG_SAMPLE_TEXT_2': 'After writing the information on the sampling tube tube, <a href="%(sitebase)s/authed/add_sample_overview/">log the sample</a> in our system.' % {'sitebase': media_locale['SITEBASE']},
'MAILING_HEADER_1': 'Mailing samples',
'MAILING_TEXT_1': 'Once you have added a <a href="#" onclick="selectTab(\'source\')">sample source, completed the relevant survey</a> (if applicable), <a href="#" onclick="selectTab(\'sample\')">taken</a> and <a href="#" onclick="selectTab(\'log\')">logged your samples</a>, you should then mail the samples back to us.',
'MAILING_TEXT_2': 'Wrap the sample tube in absorbent tissue, such as facial tissue or paper towels, and mail it back as soon as possible. The absorbent tissue will help to keep the relative humidity within the package low.',
'MAILING_TEXT_3': 'We also recommend using a reinforced envelope to reduce the chance of losing your sample due to damaged packaging.',
'MAILING_TEXT_4': 'The sooner we receive your sample, the sooner we can get it stored in our -80C freezers and ready for processing!',
'MAILING_TEXT_5': '<strong>Do not refrigerate or freeze the samples</strong> if they cannot be shipped immediately. Store them in a cool dry place such as a cabinet or a closet.',
'DOMESTIC_HEADER_1': 'Domestic Shipping',
'DOMESTIC_TEXT_1': 'Shipping within the US should be less than $1.50, but we recommend taking the sample to the post office to get the proper postage. Getting the postage right on the first try is important since samples that spend a long time in transit will likely not produce the highest quality results.',
'DOMESTIC_TEXT_2': 'This is the shipping address:',
'DOMESTIC_TEXT_3': media_locale['SHIPPING_ADDRESS'],
'INTERNATIONAL_HEADER_1': 'International Shipping',
'INTERNATIONAL_TEXT_1': 'In order to comply with amended federal and IATA regulations, we are requesting that international participants return their sample tubes through FedEx International and that international participants follow the additional safely requirements for shipping human swab samples to the United States. Your airway bill must clearly identify the package as containing "human exempt specimens". The samples will additionally need to be packaged within a secondary containment to ensure that they can safely enter the United States.',
'INTERNATIONAL_TEXT_2': 'For shipment, you will need to use clear tape to secure the sample swabs to the sample tube, then place the sample tube in the provided buff mailing envelope. Then place the buff envelope inside a Tyvek/plastic mailer, <strong>which can be acquired free of charge from FedEx</strong>, when shipping the sample, prior to FedEx shipment.',
'INTERNATIONAL_TEXT_3': 'If you do not follow these directions the sample will be destroyed by United States Customs at the port of entry into the United States.',
'INTERNATIONAL_HEADER_2': 'Your samples',
'INTERNATIONAL_TEXT_4': '<li>Are considered dried specimens</li><li>Must be shipped via FedEx</li><li>Must have tape to sealing the plastic tube that contains the swab</li><li>Must be placed in a buff mailing envelope with the buff envelope placed inside a Tyvek/plastic mailer prior to FedEx shipment</li><li>Must be shipped with an airway bill and must be labeled with the complete address of the sender and complete address of recipient, and with the words "Human exempt sample(s)"</li>',
'RESULTS_HEADER_1': 'Sequencing & Results',
'RESULTS_TEXT_1': 'Once you have added a <a href="#" onclick="selectTab(\'source\')">sample source, completed the relevant survey</a> (if applicable), <a href="#" onclick="selectTab(\'sample\')">taken</a> and <a href="#" onclick="selectTab(\'log\')">logged your samples</a> and you have <a href="#" onclick="selectTab(\'mail\')">mailed the samples back to us</a>, we will then perform sequencing and analysis on your samples.',
'RESULTS_TEXT_2': 'Sequencing and data analysis can take up to 6 months, please be patient! We will let you know as soon as your samples have been sequenced and analyzed.',
'RESULTS_READY_HEADER_1': 'Your results are ready!',
'RESULTS_READY_TEXT_1': 'One or more of the samples you submitted have been sequenced, and the results are now available online! Currently, we have only processed fecal samples, but we will be processing samples from other body sites soon.',
'RESULTS_READY_TEXT_2': 'To access your available results, hover over "Human Samples" in the menu on the left, hover over your name, then click on your sample to view your results, or click one of the links below. <b>For help interpreting results, <a href="%s/authed/addendum/">click here</a></b>. The following barcodes are ready:' % _SITEBASE,
'RESULTS_READY_TEXT_3': 'You will be able to view your results here on this website once they are available.'
}
_CHANGE_PASS_VERIFY = {
'TITLE': 'Please enter new password',
'NEW_PASSWORD': 'New Password',
'HELP_NEW_PASSWORD': 'The new password you would like to use to log in from now on.',
'CONFIRM_PASSWORD': 'Confirm Password',
'HELP_CONFIRM_PASSWORD': "Repeat your New Password again, exactly as before. We ask you to repeat it here so that you don't accidentally change your password to something you did not intend.",
'BUTTON_TEXT': 'Change Password',
'NO_VALID_CODE': 'Your password change code is not valid. If you wish to change your password please <a href="%(sitebase)s/forgot_password/">start over</a>' % {'sitebase': media_locale['SITEBASE']},
'SUCCESS': 'Your password has been changed',
'NO_EMAIL_1': 'Could not send Email',
'NO_EMAIL_2': 'We attempted to email the message below:',
'NO_EMAIL_3': 'This is a courtesy email to confirm that you have changed your password for your kit with ID %(kitid)s If you did not request this change, please email us immediately at %(help_email)s.'
}
# helper tuples for the survey questions
_NO_RESPONSE_CHOICE = "Unspecified"
_YES_NO_CHOICES = (_NO_RESPONSE_CHOICE, 'Yes', 'No')
_YES_NO_NOTSURE_CHOICES = (_NO_RESPONSE_CHOICE, 'Yes', 'No', 'Not sure')
_FREQUENCY_MONTH_CHOICES = (_NO_RESPONSE_CHOICE,
'Never',
'Rarely (a few times/month)',
'Occasionally (1-2 times/week)',
'Regularly (3-5 times/week)',
'Daily')
_FREQUENCY_WEEK_CHOICES = (_NO_RESPONSE_CHOICE,
'Never',
'Rarely (less than once/week)',
'Occasionally (1-2 times/week)',
'Regularly (3-5 times/week)',
'Daily')
_DIAGNOSIS_CHOICE = (_NO_RESPONSE_CHOICE,
'I do not have this condition',
'Diagnosed by a medical professional (doctor, physician assistant)',
'Diagnosed by an alternative medicine practitioner',
'Self-diagnosed')
_ANIMAL_SURVEY = {
'GENERAL_TITLE': 'General',
'GENERAL_QUESTION_1': 'Animal type?',
'GENERAL_QUESTION_1_CHOICES': (_NO_RESPONSE_CHOICE,
'Dog',
'Cat',
'Small mammal',
'Large mammal',
'Fish',
'Bird',
'Reptile',
'Amphibian',
'Other'),
'GENERAL_QUESTION_2': 'Origin?',
'GENERAL_QUESTION_2_CHOICES': (_NO_RESPONSE_CHOICE,
'Breeder',
'Shelter',
'Home',
'Wild'),
'GENERAL_QUESTION_3': 'Age?',
'GENERAL_QUESTION_3_CHOICES': None,
'GENERAL_QUESTION_4': 'Gender?',
'GENERAL_QUESTION_4_CHOICES': (_NO_RESPONSE_CHOICE,
'Male',
'Female',
'Unknown'),
'GENERAL_QUESTION_5': 'Setting?',
'GENERAL_QUESTION_5_CHOICES': (_NO_RESPONSE_CHOICE,
'Urban',
'Suburban',
'Rural'),
'GENERAL_QUESTION_6': 'Weight category?',
'GENERAL_QUESTION_6_CHOICES': (_NO_RESPONSE_CHOICE,
'Underweight',
'Skinny',
'Normal',
'Chubby',
'Overweight'),
'GENERAL_QUESTION_7': 'Diet classification?',
'GENERAL_QUESTION_7_CHOICES': (_NO_RESPONSE_CHOICE,
'Carnivore',
'Omnivore',
'Herbivore'),
'GENERAL_QUESTION_8': 'Food source?',
'GENERAL_QUESTION_8_CHOICES': (_NO_RESPONSE_CHOICE,
'Pet store food',
'Human food',
'Wild food'),
'GENERAL_QUESTION_9': 'Food type?',
'GENERAL_QUESTION_9_CHOICES': (_NO_RESPONSE_CHOICE,
'dry',
'wet',
'both'),
'GENERAL_QUESTION_10': 'Food special attributes?',
'GENERAL_QUESTION_10_CHOICES': (_NO_RESPONSE_CHOICE,
'Organic',
'Grain free'),
'GENERAL_QUESTION_11': 'Social?',
'GENERAL_QUESTION_11_CHOICES': (_NO_RESPONSE_CHOICE,
'Lives alone with humans',
'Lives alone no/limited humans (shelter)',
'Lives with other animals and humans',
'Lives with other animals/limited humans'),
'GENERAL_QUESTION_12': 'Any pets the current animal lives with?',
'GENERAL_QUESTION_12_CHOICES': None,
'GENERAL_QUESTION_13': 'Add the age of any human that the current animal lives with',
'GENERAL_QUESTION_13_CHOICES': None,
'GENERAL_QUESTION_14': 'Add the gender of any human that the current animal lives with',
'GENERAL_QUESTION_14_CHOICES': (_NO_RESPONSE_CHOICE,
'Male',
'Female',
'Other'),
'GENERAL_QUESTION_15': 'Hours spent outside?',
'GENERAL_QUESTION_15_CHOICES': (_NO_RESPONSE_CHOICE,
'None',
'Less than 2',
'2-4',
'4-8',
'8+'),
'GENERAL_QUESTION_16': 'Toilet water access?',
'GENERAL_QUESTION_16_CHOICES': (_NO_RESPONSE_CHOICE,
'Regular',
'Sometimes',
'Never'),
'GENERAL_QUESTION_17': 'Coprophage?',
'GENERAL_QUESTION_17_CHOICES': (_NO_RESPONSE_CHOICE,
'High',
'Moderate',
'Low',
'Never'),
'SUPPLEMENTAL_COMMENTS': 'Please write anything else about this animal that you think might affect its microorganisms.'
}
_HUMAN_SURVEY_COMPLETED = {
'COMPLETED_HEADER': 'Congratulations!',
'COMPLETED_TEXT': 'You are now an enrolled participant in the %(PROJECT_TITLE)s!' % media_locale,
'AVAILABLE_SURVEYS': 'Below are a few additional surveys that you may be interested in completing. There is no requirement to take these surveys, and your decision does not affect your involvement in the project in any way.',
'SURVEY_ASD': '<h3 style="text-align: center"><a href="%s">ASD-Cohort survey</a></h3><a href="http://www.anl.gov/contributors/jack-gilbert">Dr. Jack Gilbert</a> is exploring the relationship between gut dysbiosis and Autism Spectrum Disorders, and in conjunction with the American Gut Project, we started an ASD-Cohort study. This additional survey contains questions specific to that cohort, but it is open to any participant to take if they so choose.',
'SURVEY_VIOSCREEN': '<h3 style="text-align: center"><a href="%s">Dietary Survey</a></h3>The American Gut Project and its sister projects are very interested in diet. If you\'d like to provide additional detail about your diet, please click the link above to take a detailed diet survey (known as an Food Frequency Questionnaire). This is a validated FFQ, and is the one used by the Mayo Clinic.'
}
_SURVEY_MAIN = {
'TITLE': 'Survey',
'ONCE_YOU_START': 'Once you start this survey, you must complete it. Your answers will <strong>not</strong> be saved unless you complete the entire survey.',
'TIME_COMMITMENT': 'We anticipate that participant time commitment for completing the questionnaire online will take no more than <strong>45 minutes</strong>.',
'TYPES_OF_QUESTIONS': 'You will be asked questions about your general personal information (name, age, sex, height, weight, ethnicity, place of birth, and current ZIP or equivalent code). We will ask if you recently moved and where you moved from. We will ask questions about general diet information (including whether you follow a special diet, if you have food allergies, whether you have cultural or religious food restrictions). Other questions address whether you have pets and the type of contact you have with these pets and your relationship to other people in this study. There is a section on health information including a history of allergies/asthma, if you suffer from migraines and if you have a history of irritable bowel disease.',
'YOU_MAY_DECLINE': 'You may decline to answer any question by not selecting an answer.',
'OTHER_SURVEYS': 'Following the questionnaire, you will be presented with a few other focused surveys. As with everything, those surveys are optional but your responses could help improve our understanding of the microbiome.'
}
_NOJS = {
'MESSAGE': 'You have JavaScript disabled, which this site requires in order to function properly. <br/>Please enable javascript and reload <a href="http://www.microbio.me/americangut">http://www.microbio.me/americangut</a>.',
'NEED_HELP': 'If you need help enabling JavaScript in your browser, <br/>Please email us at <a href="mailto:americangut@gmail.com">americangut@gmail.com</a>'
}
# Actual text locale
text_locale = {
'nojs.html': _NOJS,
'404.html': _404,
'403.html': _403,
'FAQ.html': _FAQ,
'new_participant_overview.html': _NEW_PARTICIPANT_OVERVIEW,
'addendum.html': _ADDENDUM,
'portal.html': _PORTAL,
'db_error.html': _DB_ERROR,
'retrieve_kitid.html': _RETREIVE_KITID,
'add_sample.html': _ADD_SAMPLE,
'error.html': _ERROR,
'forgot_password.html': _FORGOT_PASSWORD,
'help_request.html': _HELP_REQUEST,
'new_participant.html': _NEW_PARTICIPANT,
'international.html': _INTERNATIONAL,
'add_sample_overview.html': _ADD_SAMPLE_OVERVIEW,
'participant_overview.html': _PARTICIPANT_OVERVIEW,
'sample_overview.html': _SAMPLE_OVERVIEW,
'taxa_summary.html': _TAXA_SUMMARY,
'map.html': _MAP,
'human_survey_completed.html': _HUMAN_SURVEY_COMPLETED,
'register_user.html': _REGISTER_USER,
'chage_pass_verify.html': _CHANGE_PASS_VERIFY,
'survey_main.html': _SURVEY_MAIN,
'animal_survey.html': _ANIMAL_SURVEY,
'handlers': _HANDLERS
}
| bsd-3-clause |
yyjiang/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
zihua/scikit-learn | sklearn/tests/test_kernel_approximation.py | 78 | 7586 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# abbreviations for easier formula
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# abbreviations for easier formula
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
decvalts/cartopy | lib/cartopy/mpl/patch.py | 1 | 9280 | # (C) British Crown Copyright 2011 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
Provide shapely geometry <-> matplotlib path support.
See also `Shapely Geometric Objects <see_also_shapely>`_
and `Matplotlib Path API <http://matplotlib.org/api/path_api.html>`_.
.. see_also_shapely:
http://toblerity.org/shapely/manual.html#geometric-objects
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
import matplotlib.path
from matplotlib.path import Path
import shapely.geometry as sgeom
def geos_to_path(shape):
"""
Create a list of :class:`matplotlib.path.Path` objects that describe
a shape.
Parameters
----------
shape
A list, tuple or single instance of any of the following
types: :class:`shapely.geometry.point.Point`,
:class:`shapely.geometry.linestring.LineString`,
:class:`shapely.geometry.polygon.Polygon`,
:class:`shapely.geometry.multipoint.MultiPoint`,
:class:`shapely.geometry.multipolygon.MultiPolygon`,
:class:`shapely.geometry.multilinestring.MultiLineString`,
:class:`shapely.geometry.collection.GeometryCollection`,
or any type with a _as_mpl_path() method.
Returns
-------
paths
A list of :class:`matplotlib.path.Path` objects.
"""
if isinstance(shape, (list, tuple)):
paths = []
for shp in shape:
paths.extend(geos_to_path(shp))
return paths
if isinstance(shape, (sgeom.LineString, sgeom.Point)):
return [Path(np.column_stack(shape.xy))]
elif isinstance(shape, sgeom.Polygon):
def poly_codes(poly):
codes = np.ones(len(poly.xy[0])) * Path.LINETO
codes[0] = Path.MOVETO
return codes
if shape.is_empty:
return []
vertices = np.concatenate([np.array(shape.exterior.xy)] +
[np.array(ring.xy) for ring in
shape.interiors], 1).T
codes = np.concatenate([poly_codes(shape.exterior)] +
[poly_codes(ring) for ring in shape.interiors])
return [Path(vertices, codes)]
elif isinstance(shape, (sgeom.MultiPolygon, sgeom.GeometryCollection,
sgeom.MultiLineString, sgeom.MultiPoint)):
paths = []
for geom in shape.geoms:
paths.extend(geos_to_path(geom))
return paths
elif hasattr(shape, '_as_mpl_path'):
vertices, codes = shape._as_mpl_path()
return [Path(vertices, codes)]
else:
raise ValueError('Unsupported shape type {}.'.format(type(shape)))
def path_segments(path, transform=None, remove_nans=False, clip=None,
quantize=False, simplify=False, curves=False,
stroke_width=1.0, snap=False):
"""
Create an array of vertices and a corresponding array of codes from a
:class:`matplotlib.path.Path`.
Parameters
----------
path
A :class:`matplotlib.path.Path` instance.
Other Parameters
----------------
kwargs
See :func:`matplotlib.path.iter_segments` for details of the keyword
arguments.
Returns
-------
vertices, codes
A (vertices, codes) tuple, where vertices is a numpy array of
coordinates, and codes is a numpy array of matplotlib path codes.
See :class:`matplotlib.path.Path` for information on the types of
codes and their meanings.
"""
# XXX assigned to avoid a ValueError inside the mpl C code...
a = (transform, # noqa: F841 (flake8 = assigned + unused : see above)
remove_nans, clip, quantize, simplify, curves)
# Series of cleanups and conversions to the path e.g. it
# can convert curved segments to line segments.
vertices, codes = matplotlib.path.cleanup_path(path, transform,
remove_nans, clip,
snap, stroke_width,
simplify, curves)
# Remove the final vertex (with code 0)
return vertices[:-1, :], codes[:-1]
# Matplotlib v1.3+ deprecates the use of matplotlib.path.cleanup_path. Instead
# there is a method on a Path instance to simplify this.
if hasattr(matplotlib.path.Path, 'cleaned'):
_path_segments_doc = path_segments.__doc__
def path_segments(path, **kwargs):
pth = path.cleaned(**kwargs)
return pth.vertices[:-1, :], pth.codes[:-1]
path_segments.__doc__ = _path_segments_doc
def path_to_geos(path, force_ccw=False):
"""
Create a list of Shapely geometric objects from a
:class:`matplotlib.path.Path`.
Parameters
----------
path
A :class:`matplotlib.path.Path` instance.
Other Parameters
----------------
force_ccw
Boolean flag determining whether the path can be inverted to enforce
ccw. Defaults to False.
Returns
-------
A list of instances of the following type(s):
:class:`shapely.geometry.polygon.Polygon`,
:class:`shapely.geometry.linestring.LineString` and/or
:class:`shapely.geometry.multilinestring.MultiLineString`.
"""
# Convert path into numpy array of vertices (and associated codes)
path_verts, path_codes = path_segments(path, curves=False)
# Split into subarrays such that each subarray consists of connected
# line segments based on the start of each one being marked by a
# matplotlib MOVETO code.
verts_split_inds = np.where(path_codes == Path.MOVETO)[0]
verts_split = np.split(path_verts, verts_split_inds)
codes_split = np.split(path_codes, verts_split_inds)
# Iterate through the vertices generating a list of
# (external_geom, [internal_polygons]) tuples.
other_result_geoms = []
collection = []
for path_verts, path_codes in zip(verts_split, codes_split):
if len(path_verts) == 0:
continue
# XXX A path can be given which does not end with close poly, in that
# situation, we have to guess?
verts_same_as_first = np.all(path_verts[0, :] == path_verts[1:, :],
axis=1)
if all(verts_same_as_first):
geom = sgeom.Point(path_verts[0, :])
elif path_verts.shape[0] > 4 and path_codes[-1] == Path.CLOSEPOLY:
geom = sgeom.Polygon(path_verts[:-1, :])
elif path_verts.shape[0] > 3 and verts_same_as_first[-1]:
geom = sgeom.Polygon(path_verts)
else:
geom = sgeom.LineString(path_verts)
# If geom is a Polygon and is contained within the last geom in
# collection, add it to its list of internal polygons, otherwise
# simply append it as a new external geom.
if geom.is_empty:
pass
elif (len(collection) > 0 and
isinstance(collection[-1][0], sgeom.Polygon) and
isinstance(geom, sgeom.Polygon) and
collection[-1][0].contains(geom.exterior)):
collection[-1][1].append(geom.exterior)
elif isinstance(geom, sgeom.Point):
other_result_geoms.append(geom)
else:
collection.append((geom, []))
# Convert each (external_geom, [internal_polygons]) pair into a
# a shapely Polygon that encapsulates the internal polygons, if the
# external geom is a LineString leave it alone.
geom_collection = []
for external_geom, internal_polys in collection:
if internal_polys:
# XXX worry about islands within lakes
geom = sgeom.Polygon(external_geom.exterior, internal_polys)
else:
geom = external_geom
# Correctly orientate the polygon (ccw)
if isinstance(geom, sgeom.Polygon):
if force_ccw and not geom.exterior.is_ccw:
geom = sgeom.polygon.orient(geom)
geom_collection.append(geom)
# If the geom_collection only contains LineStrings combine them
# into a single MultiLinestring.
if geom_collection and all(isinstance(geom, sgeom.LineString) for
geom in geom_collection):
geom_collection = [sgeom.MultiLineString(geom_collection)]
# Remove any zero area Polygons
def not_zero_poly(geom):
return ((isinstance(geom, sgeom.Polygon) and not geom._is_empty and
geom.area != 0) or
not isinstance(geom, sgeom.Polygon))
result = list(filter(not_zero_poly, geom_collection))
return result + other_result_geoms
| gpl-3.0 |
xyguo/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 33 | 10515 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature elimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
def test_rfe_cv_n_jobs():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
rfecv = RFECV(estimator=SVC(kernel='linear'))
rfecv.fit(X, y)
rfecv_ranking = rfecv.ranking_
rfecv_grid_scores = rfecv.grid_scores_
rfecv.set_params(n_jobs=2)
rfecv.fit(X, y)
assert_array_almost_equal(rfecv.ranking_, rfecv_ranking)
assert_array_almost_equal(rfecv.grid_scores_, rfecv_grid_scores)
| bsd-3-clause |
maxikov/bikedatan | rebalancing_analytics.py | 1 | 1291 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import data_utils
import sys
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import numpy as np
def main():
empty_times, full_times = data_utils.load_empty_full_station_times_of_day(debug = True)
n, bins, patches = plt.hist(empty_times, 50, normed=0, facecolor='blue', alpha=0.5)
n, bins, patches = plt.hist(full_times, 50, normed=0, facecolor='green', alpha=0.5)
plt.xlabel("Hour of the day")
plt.ylabel("Observations")
plt.title("Observations when stations are empty (blue) of full (green)")
plt.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
Fireblend/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 303 | 2841 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix = dist_matrix + dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
murali-munna/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
JonasHarnau/apc | apc/tests/test_fit_table.py | 1 | 4632 | import unittest
import pandas as pd
import numpy as np
import apc
class TestFitTable(unittest.TestCase):
def test_TA_odp(self):
model = apc.Model()
model.data_from_df(apc.loss_TA(), data_format='CL')
model.fit(family='od_poisson_response', predictor='AC')
model.fit_table()
dev_table_Ad = model.fit_table(reference_predictor='Ad',
attach_to_self=False)
self.assertTrue(
np.allclose(
model.deviance_table.sum().values,
np.array(
[6.14205640e+07, 4.26000000e+02, 0.00000000e+00, 4.42934379e+07,
1.02000000e+02, 6.42698087e+01, 8.77350747e-01]
)
)
)
self.assertTrue(
np.allclose(
dev_table_Ad.sum().values,
np.array(
[3.34371797e+07, 2.48000000e+02, 0.00000000e+00, 2.20883978e+07,
2.80000000e+01, 5.10659264e+01, 5.28199268e-02]
)
)
)
def test_Belgian_ln_rates(self):
model = apc.Model()
model.data_from_df(**apc.Belgian_lung_cancer())
model.fit(family='log_normal_rates', predictor='APC')
model.fit_table()
self.assertTrue(
np.allclose(
model.deviance_table.sum().values,
np.array(
[694.2842826, 508.000000 , 1367.09717145, 238.000000,
349.62681465, 2.73393206, 998.2842826]
)
)
)
def test_Belgian_bin_dose_response(self):
data = apc.Belgian_lung_cancer()
dose = (data['response']/data['rate'] * 10**5).astype(int)
model = apc.Model()
model.data_from_df(data['response'], dose=dose, data_format='AP')
model.fit_table('binomial_dose_response', 'APC')
self.assertTrue(np.allclose(
model.deviance_table.astype(float).values,
np.array([
[2.02272942e+01, 1.80000000e+01, 3.20169615e-01, np.nan, np.nan, np.nan],
[2.55616207e+01, 3.00000000e+01, 6.97305582e-01, 5.33432652e+00,
1.20000000e+01, 9.45870225e-01],
[2.14563493e+01, 2.00000000e+01, 3.70723402e-01, 1.22905512e+00,
2.00000000e+00, 5.40896377e-01],
[9.91929917e+01, 2.70000000e+01, 3.49109630e-10, 7.89656975e+01,
9.00000000e+00, 2.59348099e-13],
[2.65878986e+01, 3.20000000e+01, 7.37036572e-01, 6.36060439e+00,
1.40000000e+01, 9.56568004e-01],
[2.53472759e+02, 3.90000000e+01, 0.00000000e+00, 2.33245465e+02,
2.10000000e+01, 0.00000000e+00],
[1.00677524e+02, 2.90000000e+01, 7.61992691e-10, 8.04502302e+01,
1.10000000e+01, 1.20758958e-12],
[8.55939082e+01, 3.30000000e+01, 1.48750103e-06, 6.53666140e+01,
1.50000000e+01, 2.94677404e-08],
[6.39083556e+03, 4.00000000e+01, 0.00000000e+00, 6.37060827e+03,
2.20000000e+01, 0.00000000e+00],
[1.21719783e+03, 3.00000000e+01, 0.00000000e+00, 1.19697053e+03,
1.20000000e+01, 0.00000000e+00],
[2.54429395e+02, 4.10000000e+01, 0.00000000e+00, 2.34202101e+02,
2.30000000e+01, 0.00000000e+00],
[3.08059993e+02, 4.20000000e+01, 0.00000000e+00, 2.87832698e+02,
2.40000000e+01, 0.00000000e+00],
[6.39139748e+03, 4.20000000e+01, 0.00000000e+00, 6.37117019e+03,
2.40000000e+01, 0.00000000e+00],
[1.61214822e+03, 4.20000000e+01, 0.00000000e+00, 1.59192092e+03,
2.40000000e+01, 0.00000000e+00],
[6.50047766e+03, 4.30000000e+01, 0.00000000e+00, 6.48025037e+03,
2.50000000e+01, 0.00000000e+00]
]),
equal_nan=True)
)
def test_Belgian_poisson_dose_response(self):
model = apc.Model()
model.data_from_df(**apc.Belgian_lung_cancer())
model.fit_table('poisson_dose_response', 'APC')
self.assertTrue(
np.allclose(
model.deviance_table.astype(float).sum().values,
np.array(
[2.33052840e+04, 5.08000000e+02, 2.12588574e+00, 2.30019096e+04,
2.38000000e+02, 2.44351741e+00]
)
)
)
if __name__ == '__main__':
unittest.main() | gpl-3.0 |
spallavolu/scikit-learn | sklearn/cluster/tests/test_birch.py | 342 | 5603 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
arank/mxnet | example/ssd/detect/detector.py | 16 | 6261 | from __future__ import print_function
import mxnet as mx
import numpy as np
from timeit import default_timer as timer
from dataset.testdb import TestDB
from dataset.iterator import DetIter
class Detector(object):
"""
SSD detector which hold a detection network and wraps detection API
Parameters:
----------
symbol : mx.Symbol
detection network Symbol
model_prefix : str
name prefix of trained model
epoch : int
load epoch of trained model
data_shape : int
input data resize shape
mean_pixels : tuple of float
(mean_r, mean_g, mean_b)
batch_size : int
run detection with batch size
ctx : mx.ctx
device to use, if None, use mx.cpu() as default context
"""
def __init__(self, symbol, model_prefix, epoch, data_shape, mean_pixels, \
batch_size=1, ctx=None):
self.ctx = ctx
if self.ctx is None:
self.ctx = mx.cpu()
load_symbol, args, auxs = mx.model.load_checkpoint(model_prefix, epoch)
if symbol is None:
symbol = load_symbol
self.mod = mx.mod.Module(symbol, label_names=None, context=ctx)
self.data_shape = data_shape
self.mod.bind(data_shapes=[('data', (batch_size, 3, data_shape, data_shape))])
self.mod.set_params(args, auxs)
self.data_shape = data_shape
self.mean_pixels = mean_pixels
def detect(self, det_iter, show_timer=False):
"""
detect all images in iterator
Parameters:
----------
det_iter : DetIter
iterator for all testing images
show_timer : Boolean
whether to print out detection exec time
Returns:
----------
list of detection results
"""
num_images = det_iter._size
if not isinstance(det_iter, mx.io.PrefetchingIter):
det_iter = mx.io.PrefetchingIter(det_iter)
start = timer()
detections = self.mod.predict(det_iter).asnumpy()
time_elapsed = timer() - start
if show_timer:
print("Detection time for {} images: {:.4f} sec".format(
num_images, time_elapsed))
result = []
for i in range(detections.shape[0]):
det = detections[i, :, :]
res = det[np.where(det[:, 0] >= 0)[0]]
result.append(res)
return result
def im_detect(self, im_list, root_dir=None, extension=None, show_timer=False):
"""
wrapper for detecting multiple images
Parameters:
----------
im_list : list of str
image path or list of image paths
root_dir : str
directory of input images, optional if image path already
has full directory information
extension : str
image extension, eg. ".jpg", optional
Returns:
----------
list of detection results in format [det0, det1...], det is in
format np.array([id, score, xmin, ymin, xmax, ymax]...)
"""
test_db = TestDB(im_list, root_dir=root_dir, extension=extension)
test_iter = DetIter(test_db, 1, self.data_shape, self.mean_pixels,
is_train=False)
return self.detect(test_iter, show_timer)
def visualize_detection(self, img, dets, classes=[], thresh=0.6):
"""
visualize detections in one image
Parameters:
----------
img : numpy.array
image, in bgr format
dets : numpy.array
ssd detections, numpy.array([[id, score, x1, y1, x2, y2]...])
each row is one object
classes : tuple or list of str
class names
thresh : float
score threshold
"""
import matplotlib.pyplot as plt
import random
plt.imshow(img)
height = img.shape[0]
width = img.shape[1]
colors = dict()
for i in range(dets.shape[0]):
cls_id = int(dets[i, 0])
if cls_id >= 0:
score = dets[i, 1]
if score > thresh:
if cls_id not in colors:
colors[cls_id] = (random.random(), random.random(), random.random())
xmin = int(dets[i, 2] * width)
ymin = int(dets[i, 3] * height)
xmax = int(dets[i, 4] * width)
ymax = int(dets[i, 5] * height)
rect = plt.Rectangle((xmin, ymin), xmax - xmin,
ymax - ymin, fill=False,
edgecolor=colors[cls_id],
linewidth=3.5)
plt.gca().add_patch(rect)
class_name = str(cls_id)
if classes and len(classes) > cls_id:
class_name = classes[cls_id]
plt.gca().text(xmin, ymin - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor=colors[cls_id], alpha=0.5),
fontsize=12, color='white')
plt.show()
def detect_and_visualize(self, im_list, root_dir=None, extension=None,
classes=[], thresh=0.6, show_timer=False):
"""
wrapper for im_detect and visualize_detection
Parameters:
----------
im_list : list of str or str
image path or list of image paths
root_dir : str or None
directory of input images, optional if image path already
has full directory information
extension : str or None
image extension, eg. ".jpg", optional
Returns:
----------
"""
import cv2
dets = self.im_detect(im_list, root_dir, extension, show_timer=show_timer)
if not isinstance(im_list, list):
im_list = [im_list]
assert len(dets) == len(im_list)
for k, det in enumerate(dets):
img = cv2.imread(im_list[k])
img[:, :, (0, 1, 2)] = img[:, :, (2, 1, 0)]
self.visualize_detection(img, det, classes, thresh)
| apache-2.0 |
katholt/Kleborate | scripts/kleborate_to_microreact.py | 1 | 12287 | #!/usr/bin/env python3
"""
Copyright 2018 Kat Holt
Copyright 2018 Ryan Wick (rrwick@gmail.com)
https://github.com/katholt/Kleborate/
This file is part of Kleborate. Kleborate is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by the Free Software Foundation,
either version 3 of the License, or (at your option) any later version. Kleborate is distributed in
the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details. You should have received a copy of the GNU General Public License along with Kleborate. If
not, see <http://www.gnu.org/licenses/>.
"""
import sys
import argparse
import collections
import pandas as pd
from Bio import Phylo
def get_arguments():
parser = argparse.ArgumentParser(description='A script for converting Kleborate output into a'
'format compatible with Microreact')
required_args = parser.add_argument_group('Required arguments')
required_args.add_argument('--kleborate_in', type=str, required=True,
help='Kleborate tab-delimited results file')
required_args.add_argument('--tree_in', type=str, required=True,
help='Phylogenetic tree')
required_args.add_argument('--csv_out', type=str, required=True,
help='Kleborate results in Microreact format')
required_args.add_argument('--tree_out', type=str, required=True,
help='Tree in Microreact format')
return parser.parse_args()
def main():
args = get_arguments()
name_subs = name_substitution(args.kleborate_in)
check_for_unique_names(name_subs)
save_tree_with_new_names(args.tree_in, args.tree_out, name_subs)
autocolour_columns = get_autocolour_columns(args.kleborate_in)
csv_lines = []
with open(args.kleborate_in, 'rt') as kleborate_results:
original_header, new_header = None, None
for line in kleborate_results:
line = line.rstrip('\n')
if original_header is None:
original_header = line.split('\t')
new_header = get_new_header(original_header, autocolour_columns)
line_parts = new_header
else:
line_parts = get_data(line, name_subs, original_header, new_header)
csv_lines.append((','.join(line_parts)))
print()
print('Writing Microreact table to: {}'.format(args.csv_out))
with open(args.csv_out, 'wt') as output_csv:
for line in csv_lines:
output_csv.write(line)
output_csv.write('\n')
print()
def get_autocolour_columns(kleborate_in):
autocolour_columns = []
table = pd.read_table(kleborate_in)
for col_name in ['species', 'ST', 'YbST', 'CbST', 'AbST', 'SmST', 'wzi', 'K_locus',
'O_locus']:
try:
if len(set(table[col_name])) > 1:
autocolour_columns.append(col_name)
except KeyError:
pass
print()
print('Using "__autocolour" on the following columns:')
print(' ', ', '.join(autocolour_columns))
return set(autocolour_columns)
def get_new_header(original_header, autocolour_columns):
original_header[0] = 'id' # Change 'strain' to 'id' for Microreact.
for autocolour_column in autocolour_columns:
i = find_column_index(original_header, autocolour_column)
original_header[i] = autocolour_column + '__autocolour'
header = list(original_header)
for col in ['virulence_score', 'resistance_score', 'num_resistance_classes',
'num_resistance_genes', 'Yersiniabactin', 'Colibactin', 'Aerobactin',
'Salmochelin', 'rmpA', 'rmpA2']:
header.insert(find_column_index(header, col) + 1, col + '__colour')
for res in ['AGly_acquired', 'Col_acquired', 'Fcyn_acquired', 'Flq_acquired', 'Gly_acquired', 'MLS_acquired', 'Phe_acquired', 'Rif_acquired', 'Sul_acquired', 'Tet_acquired',
'Tgc_acquired', 'Tmt_acquired', 'Bla_acquired', 'Bla_inhR_acquired', 'Bla_ESBL_acquired', 'Bla_ESBL_inhR_acquired', 'Bla_Carb_acquired', 'Bla_chr', 'SHV_mutations',
'Col_mutations', 'Flq_mutations', 'Omp_mutations']:
header.insert(find_column_index(header, res) + 1, res + '__colour')
header.remove(res)
return header
def get_data(line, name_subs, original_header, new_header):
line = line.replace(',', ';')
line_parts = line.split('\t')
line_parts[0] = name_subs[line_parts[0]]
original_data = dict(zip(original_header, line_parts))
new_data = {h: '' for h in new_header}
for label, value in original_data.items():
new_data[label] = value
vir_score = int(original_data['virulence_score'])
res_score = int(original_data['resistance_score'])
res_classes = int(original_data['num_resistance_classes'])
res_genes = int(original_data['num_resistance_genes'])
new_data['virulence_score__colour'] = get_vir_score_colour(vir_score)
new_data['resistance_score__colour'] = get_res_score_colour(res_score)
new_data['num_resistance_classes__colour'] = get_res_classes_colour(res_classes)
new_data['num_resistance_genes__colour'] = get_res_genes_colour(res_genes)
# new_data['Yersiniabactin__colour'] = get_vir_lineage_colour(original_data['Yersiniabactin'])
new_data['Colibactin__colour'] = get_vir_lineage_colour(original_data['Colibactin'])
new_data['Aerobactin__colour'] = get_vir_lineage_colour(original_data['Aerobactin'])
new_data['Salmochelin__colour'] = get_vir_lineage_colour(original_data['Salmochelin'])
new_data['rmpA__colour'] = get_rmpA_colour(original_data['rmpA'])
new_data['rmpA2__colour'] = get_rmpA2_colour(original_data['rmpA2'])
for res_class in ['AGly_acquired', 'Col_acquired', 'Fcyn_acquired', 'Flq_acquired', 'Gly_acquired', 'MLS_acquired', 'Phe_acquired', 'Rif_acquired', 'Sul_acquired', 'Tet_acquired',
'Tgc_acquired', 'Tmt_acquired', 'Bla_acquired', 'Bla_inhR_acquired', 'Bla_ESBL_acquired', 'Bla_ESBL_inhR_acquired', 'Bla_Carb_acquired', 'Bla_chr', 'SHV_mutations',
'Col_mutations', 'Flq_mutations', 'Omp_mutations']:
new_data[res_class + '__colour'] = get_res_class_colour(original_data[res_class])
return [new_data[h] for h in new_header]
def name_substitution(kleborate_in):
name_subs = {}
with open(kleborate_in, 'rt') as kleborate_results:
header = None
for line in kleborate_results:
if header is None:
header = line.split('\t')
if header[0] != 'strain':
sys.exit('Error: first column is not "strain" - is this Kleborate output?')
else:
line_parts = line.split('\t')
if len(line_parts) != len(header):
sys.exit('Error: inconsistent number of columns')
old_name = line_parts[0]
if old_name in name_subs:
sys.exit('Error: duplicate sample ID: ' + old_name)
new_name = old_name.replace('.', '_')
new_name = new_name.replace(',', '_')
new_name = new_name.replace("'", '_')
new_name = new_name.replace('"', '_')
name_subs[old_name] = new_name
return name_subs
def check_for_unique_names(name_subs):
names = list(name_subs.values())
duplicate_names = [item for item, count in collections.Counter(names).items() if count > 1]
if duplicate_names:
sys.exit('Error: duplicate sample IDs: ' + ', '.join(duplicate_names))
def save_tree_with_new_names(tree_in, tree_out, name_subs):
print()
print('Writing Microreact tree to: {}'.format(tree_out))
tree_format = None
for try_tree_format in ['newick', 'nexus', 'nexml', 'phyloxml', 'cdao']:
try:
Phylo.read(tree_in, try_tree_format)
tree_format = try_tree_format
break
except ValueError:
pass
if tree_format is None:
sys.exit('Error: could not read input tree')
tree = Phylo.read(tree_in, tree_format)
for node in tree.get_terminals():
name = str(node.name)
try:
node.name = name_subs[name]
except IndexError:
sys.exit('Error: sample name in tree not in Kleborate data: ' + name)
Phylo.write(tree, tree_out, 'newick')
def scale_num(start, end, progress):
return int(round(start * (1.0 - progress) + end * progress))
def colour_range(start, end, count):
start, end = start.lower(), end.lower()
if start.startswith('#'):
start = start[1:]
if end.startswith('#'):
end = end[1:]
start_r, start_g, start_b = int(start[0:2], 16), int(start[2:4], 16), int(start[4:6], 16)
end_r, end_g, end_b = int(end[0:2], 16), int(end[2:4], 16), int(end[4:6], 16)
colours = []
for i in range(count):
progress = i / (count - 1)
r, g, b = scale_num(start_r, end_r, progress), scale_num(start_g, end_g, progress), \
scale_num(start_b, end_b, progress)
hex_colour = '"#' + ('0x%X' % r)[2:] + ('0x%X' % g)[2:] + ('0x%X' % b)[2:] + '"'
colours.append(hex_colour)
return colours
def find_column_index(header, col_name):
try:
return header.index(col_name)
except ValueError:
sys.exit('Error: could not find ' + col_name + ' column in Kleborate')
def get_vir_score_colour(vir_score):
try:
return ['#DEEBF7', '#9ECAE1', '#6BAED6', '#4292C6', '#2171B5', '#08306B'][vir_score]
except IndexError:
return '#BFBFBF'
def get_res_score_colour(res_score):
try:
return ['#FCBBA1', '#FC9272', '#FB6A4A', '#BE413D'][res_score]
except IndexError:
return '#BFBFBF'
def get_res_classes_colour(res_classes):
try:
return colour_range('#FCBBA1', '#BE413D', 11)[res_classes]
except IndexError:
return '#BE413D'
def get_res_genes_colour(res_genes):
try:
return colour_range('#FCBBA1', '#BE413D', 21)[res_genes]
except IndexError:
return '#BE413D'
def get_species_colour(species):
try:
return {'Klebsiella pneumoniae': '#875F9A',
'Klebsiella variicola subsp. variicola': '#8CBDB2',
'Klebsiella quasivariicola': '#F0B663',
'Klebsiella quasipneumoniae subsp. quasipneumoniae': '#ED6060',
'Klebsiella quasipneumoniae subsp. similipneumoniae': '#EDA483'}[species]
except IndexError:
return '#BFBFBF'
def get_vir_lineage_colour(vir_lineage):
vir_lineage_colours = {'ybt 1': '#B27F91', 'ybt 2': '#CDA12C', 'ybt 3': '#56A354',
'ybt 4': '#F28FA2', 'ybt 5': '#DB7723', 'ybt 6': '#93539D',
'ybt 7': '#3A85A8', 'ybt 8': '#7B75CC', 'ybt 9': '#D9C5EF',
'ybt 10': '#449D72', 'ybt 11': '#EBD930', 'ybt 12': '#6AA3C6',
'ybt 13': '#A39F93', 'ybt 14': '#93539D', 'ybt 15': '#EDC59A',
'ybt 16': '#840639', 'ybt 17': '#E25065', 'clb 1': '#99BBE0',
'clb 2A': '#5972AF', 'clb 2B': '#242F69', 'clb 3': '#242F69',
'iro 1': '#B6D5EF', 'iro 2': '#DEC4E8', 'iro 3': '#E29771',
'iro 4': '#A4A4EA', 'iro 5': '#E0AAAA', 'iuc 1': '#B6D5EF',
'iuc 2': '#DEC4E8', 'iuc 2A': '#D8ABDD', 'iuc 3': '#C3EADB',
'iuc 4': '#9ACCBC', 'iuc 5': '#E0AAAA'}
vir_lineage = vir_lineage.split(';')[0]
if vir_lineage in vir_lineage_colours:
return vir_lineage_colours[vir_lineage]
elif vir_lineage == '-':
return '#FFFFFF'
else:
return '#BFBFBF'
def get_rmpA_colour(rmpA):
return '#FFFFFF' if rmpA == '-' else '#08306B'
def get_rmpA2_colour(rmpA2):
return '#FFFFFF' if rmpA2 == '-' else '#08306B'
def get_res_class_colour(res_class):
return '#FFFFFF' if res_class == '-' else '#BE413D'
if __name__ == '__main__':
main()
| gpl-3.0 |
kernc/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/pandas/tests/series/test_replace.py | 8 | 7896 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import numpy as np
import pandas as pd
import pandas.lib as lib
import pandas.util.testing as tm
from .common import TestData
class TestSeriesReplace(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_replace(self):
N = 100
ser = pd.Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
ser.replace([np.nan], -1, inplace=True)
exp = ser.fillna(-1)
tm.assert_series_equal(ser, exp)
rs = ser.replace(0., np.nan)
ser[ser == 0.] = np.nan
tm.assert_series_equal(rs, ser)
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),
dtype=object)
ser[:5] = np.nan
ser[6:10] = 'foo'
ser[20:30] = 'bar'
# replace list with a single value
rs = ser.replace([np.nan, 'foo', 'bar'], -1)
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -1).all())
self.assertTrue((rs[20:30] == -1).all())
self.assertTrue((pd.isnull(ser[:5])).all())
# replace with different values
rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -2).all())
self.assertTrue((rs[20:30] == -3).all())
self.assertTrue((pd.isnull(ser[:5])).all())
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)
self.assertTrue((ser[:5] == -1).all())
self.assertTrue((ser[6:10] == -1).all())
self.assertTrue((ser[20:30] == -1).all())
ser = pd.Series([np.nan, 0, np.inf])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = pd.Series([np.nan, 0, 'foo', 'bar', np.inf, None, lib.NaT])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
tm.assert_series_equal(ser.replace(np.inf, 0), filled)
ser = pd.Series(self.ts.index)
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
self.assertRaises(ValueError, ser.replace, [1, 2, 3], [np.nan, 0])
# make sure that we aren't just masking a TypeError because bools don't
# implement indexing
with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):
ser.replace([1, 2], [np.nan, 0])
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))
def test_replace_gh5319(self):
# API change from 0.12?
# GH 5319
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
tm.assert_series_equal(result, expected)
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
tm.assert_series_equal(result, expected)
# GH 5797
ser = pd.Series(pd.date_range('20130101', periods=5))
expected = ser.copy()
expected.loc[2] = pd.Timestamp('20120101')
result = ser.replace({pd.Timestamp('20130103'):
pd.Timestamp('20120101')})
tm.assert_series_equal(result, expected)
result = ser.replace(pd.Timestamp('20130103'),
pd.Timestamp('20120101'))
tm.assert_series_equal(result, expected)
def test_replace_with_single_list(self):
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([1, 2, 3])
tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))
s = ser.copy()
s.replace([1, 2, 3], inplace=True)
tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4]))
# make sure things don't get corrupted when fillna call fails
s = ser.copy()
with tm.assertRaises(ValueError):
s.replace([1, 2, 3], inplace=True, method='crash_cymbal')
tm.assert_series_equal(s, ser)
def test_replace_mixed_types(self):
s = pd.Series(np.arange(5), dtype='int64')
def check_replace(to_rep, val, expected):
sc = s.copy()
r = s.replace(to_rep, val)
sc.replace(to_rep, val, inplace=True)
tm.assert_series_equal(expected, r)
tm.assert_series_equal(expected, sc)
# should NOT upcast to float
e = pd.Series([0, 1, 2, 3, 4])
tr, v = [3], [3.0]
check_replace(tr, v, e)
# MUST upcast to float
e = pd.Series([0, 1, 2, 3.5, 4])
tr, v = [3], [3.5]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, 'a'])
tr, v = [3, 4], [3.5, 'a']
check_replace(tr, v, e)
# again casts to object
e = pd.Series([0, 1, 2, 3.5, pd.Timestamp('20130101')])
tr, v = [3, 4], [3.5, pd.Timestamp('20130101')]
check_replace(tr, v, e)
# casts to float
e = pd.Series([0, 1, 2, 3.5, 1])
tr, v = [3, 4], [3.5, True]
check_replace(tr, v, e)
# test an object with dates + floats + integers + strings
dr = pd.date_range('1/1/2001', '1/10/2001',
freq='D').to_series().reset_index(drop=True)
result = dr.astype(object).replace(
[dr[0], dr[1], dr[2]], [1.0, 2, 'a'])
expected = pd.Series([1.0, 2, 'a'] + dr[3:].tolist(), dtype=object)
tm.assert_series_equal(result, expected)
def test_replace_bool_with_string_no_op(self):
s = pd.Series([True, False, True])
result = s.replace('fun', 'in-the-sun')
tm.assert_series_equal(s, result)
def test_replace_bool_with_string(self):
# nonexistent elements
s = pd.Series([True, False, True])
result = s.replace(True, '2u')
expected = pd.Series(['2u', False, '2u'])
tm.assert_series_equal(expected, result)
def test_replace_bool_with_bool(self):
s = pd.Series([True, False, True])
result = s.replace(True, False)
expected = pd.Series([False] * len(s))
tm.assert_series_equal(expected, result)
def test_replace_with_dict_with_bool_keys(self):
s = pd.Series([True, False, True])
with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):
s.replace({'asdf': 'asdb', True: 'yes'})
def test_replace2(self):
N = 100
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),
dtype=object)
ser[:5] = np.nan
ser[6:10] = 'foo'
ser[20:30] = 'bar'
# replace list with a single value
rs = ser.replace([np.nan, 'foo', 'bar'], -1)
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -1).all())
self.assertTrue((rs[20:30] == -1).all())
self.assertTrue((pd.isnull(ser[:5])).all())
# replace with different values
rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -2).all())
self.assertTrue((rs[20:30] == -3).all())
self.assertTrue((pd.isnull(ser[:5])).all())
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)
self.assertTrue((ser[:5] == -1).all())
self.assertTrue((ser[6:10] == -1).all())
self.assertTrue((ser[20:30] == -1).all())
| mit |
lucaskotres/ElipseExamples | EPM/Plugins/ML/ML.py | 1 | 1274 | # coding=utf-8
#Machine Learning Toolbox
import Plugins as ep
import tools
import pandas as pd
MSG_MSGBOXTITLE = 'EPM - Message'
ERR_NOPENSELECTED = 'Please, execute dataset then select one pen'
@ep.DatasetFunctionPlugin('Linear Regression', 1)
def linear_regression():
if len(ep.EpmDatasetPens.SelectedPens) != 1:
ep.showMsgBox(MSG_MSGBOXTITLE, ERR_NOPENSELECTED, 'Warning')
return 0
epm_tag = ep.EpmDatasetPens.SelectedPens[0].Values
print(epm_tag)
@ep.DatasetFunctionPlugin('function 1', 2)
def function1():
pass
@ep.DatasetFunctionPlugin('function 2', 3)
def function2():
pass
@ep.DatasetFunctionPlugin('function 3', 4)
def function3():
pass
@ep.DatasetFunctionPlugin('function 4', 5)
def function4():
pass
@ep.DatasetFunctionPlugin('Run Test', 6)
def run_test():
if len(ep.EpmDatasetPens.SelectedPens) != 1:
ep.showMsgBox(MSG_MSGBOXTITLE, ERR_NOPENSELECTED, 'Warning')
return 0
epm_tag = ep.EpmDatasetPens.SelectedPens[0].Values
#convert numpy to pandas dataframe
print(tools.np2pd(epm_tag))
#plot line
tools.plot(epm_tag['Value'])
#print OLS Summary
#tools.print_OLS(tools.np2pd(epm_tag))
tools.test_stationarity(tools.np2pd(epm_tag))
| mit |
Shashank00/testdata | hsi_utils.py | 1 | 29348 | #!/usr/bin/python
#
# Copyright (c) 2013-2015, Zhouhan LIN
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pdb
import time
import sys
import scipy.io as sio
import numpy
import theano
import pylab as pl
from sklearn.decomposition import PCA
from sklearn.metrics import confusion_matrix
# the following color map is for generating wholeimage classification figures.
cmap = numpy.asarray( [[0, 0, 0],
[0, 205, 0],
[127, 255, 0],
[46, 139, 87],
[0, 139, 0],
[160, 82, 45],
[0, 255, 255],
[255, 255, 255],
[216, 191, 216],
[255, 0, 0],
[139, 0, 0],
[0, 0, 255],
[255, 255, 0],
[238, 154, 0],
[85, 26, 139],
[255, 127, 80]], dtype='int32')
def result_analysis(prediction, train_truth, valid_truth, test_truth,
verbose=False):
assert prediction.shape == test_truth.shape
print ("Detailed information in each category:")
print (" Number of Samples")
print ("Class No. TRAIN VALID TEST RightCount RightRate")
for i in xrange(test_truth.min(), test_truth.max()+1):
right_prediction = ( (test_truth-prediction) == 0 )
right_count = numpy.sum(((test_truth==i) * right_prediction)*1)
print ("%d\t\t%d\t%d\t%d\t%d\t%f" % \
(i,
numpy.sum((train_truth==i)*1),
numpy.sum((valid_truth==i)*1),
numpy.sum((test_truth==i)*1),
right_count,
right_count * 1.0 / numpy.sum((test_truth==i)*1)
))
total_right_count = numpy.sum(right_prediction*1)
print ("Overall\t\t%d\t%d\t%d\t%d\t%f" % \
(train_truth.size,
valid_truth.size,
test_truth.size,
total_right_count,
total_right_count * 1.0 / test_truth.size
))
cm = confusion_matrix(test_truth, prediction)
pr_a = cm.trace()*1.0 / test_truth.size
pr_e = ((cm.sum(axis=0)*1.0/test_truth.size) * \
(cm.sum(axis=1)*1.0/test_truth.size)).sum()
k = (pr_a - pr_e) / (1 - pr_e)
print ("kappa index of agreement: %f" % k)
print ("confusion matrix:")
print (cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix')
pl.colorbar()
if verbose:
pl.show()
else:
filename = 'conf_mtx_' + str(time.time()) + '.png'
pl.savefig(filename)
#-------------------------------------------------------------------------------
"""
The scale_to_unit_interval() and tile_raster_images() functions are from the
Deep Learning Tutorial repo:
https://github.com/lisa-lab/DeepLearningTutorials
Below are the corresponding licence.
LICENSE
=======
Copyright (c) 2010--2015, Deep Learning Tutorials Development Team
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Theano nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
def scale_to_unit_interval(ndar, eps=1e-8):
""" Scales all values in the ndarray ndar to be between 0 and 1 """
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
:type X: a 2-D ndarray or a tuple of 4 channels, elements of which can
be 2-D ndarrays or None;
:param X: a 2-D array in which every row is a flattened image.
:type img_shape: tuple; (height, width)
:param img_shape: the original shape of each image
:type tile_shape: tuple; (rows, cols)
:param tile_shape: the number of images to tile (rows, cols)
:param output_pixel_vals: if output should be pixel values (i.e. int8
values) or floats
:param scale_rows_to_unit_interval: if the values need to be scaled before
being plotted to [0,1] or not
:returns: array suitable for viewing as an image.
(See:`Image.fromarray`.)
:rtype: a 2-d array with same dtype as X.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
out_shape = [
(ishp + tsp) * tshp - tsp
for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)
]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output numpy ndarray to store the image
if output_pixel_vals:
out_array = numpy.zeros((out_shape[0], out_shape[1], 4),
dtype='uint8')
else:
out_array = numpy.zeros((out_shape[0], out_shape[1], 4),
dtype=X.dtype)
#colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in xrange(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = numpy.zeros(
out_shape,
dtype=dt
) + channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = numpy.zeros(out_shape, dtype=dt)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
this_x = X[tile_row * tile_shape[1] + tile_col]
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(
this_x.reshape(img_shape))
else:
this_img = this_x.reshape(img_shape)
# add the slice to the corresponding position in the
# output array
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W
] = this_img * c
return out_array
#-------------------------------------------------------------------------------
def PCA_tramsform_img(img=None, n_principle=3):
"""
This function trainsforms an HSI by 1-D PCA. PCA is fitted on the whole data
and is conducted on the spectral dimension, rendering the image from size
length * width * dim to length * width * n_principle.
Parameters:
img: initial unregularizaed HSI.
n_principle: Target number of principles we want.
Return:
reg_img: Regularized, transformed image.
WARNNING: RELATIVE ENERGY BETWEEN PRINCIPLE COMPONENTS CHANGED IN THIS
IMPLEMENTATION. YOU MAY NEED TO ADD PENALTY MULTIPLIERS IN THE HIGHER NETWORKS
TO REIMBURSE IT.
"""
length = img.shape[0]
width = img.shape[1]
dim = img.shape[2]
# reshape img, HORIZONTALLY strench the img, without changing the spectral dim.
reshaped_img = numpy.asarray(img.reshape(length*width, dim),
dtype=theano.config.floatX)
pca = PCA(n_components=n_principle)
pca_img = pca.fit_transform(reshaped_img)
# Regularization: Think about energy of each principles here.
reg_img = scale_to_unit_interval(ndar=pca_img, eps=1e-8)
reg_img = numpy.asarray(reg_img.reshape(length, width, n_principle),
dtype=theano.config.floatX)
energy_dist = pca.explained_variance_ratio_
residual = 1 - numpy.sum(energy_dist[0: n_principle])
return reg_img, energy_dist, residual
def T_pca_constructor(hsi_img=None, gnd_img=None, n_principle=3, window_size=1,
flag='supervised', merge=False):
"""
This function constructs the spectral and spatial facade for each training
pixel.
Spectral data returned are simply spectra.
spatial data returned are the former n_principle PCs of a neibor region
around each extracted pixel. Size of the neibor region is determined by
window_size. And all the values for a pixel are flattened to 1-D size. So
data_spatial is finally a 2-D numpy.array.
All the returned data are regularized to [0, 1]. Set window_size=1 to
get pure spectral returnings.
Parameters:
hsi_img=None: 3-D numpy.ndarray, dtype=float, storing initial
hyperspectral image data.
gnd_img=None: 2-D numpy.ndarray, dtype=int, containing tags for pixeles.
The size is the same to the hsi_img size, but with only
1 band.
n_principle: Target number of principles we want to consider in the
spatial infomation.
window_size: Determins the scale of spatial information incorporated.
Must be odd.
flag: For 'unsupervised', all possible pixels except marginals
are processed. For 'supervised', only pixels with
non-zero tags are processed.
Return:
data_spectral: 2-D numpy.array, sized (sample number) * (band number).
Consists of regularized spectra for all extracted pixels.
data_spatial: 2-D numpy.array, sized (sample number) * (window_size^2).
Consists the former n_principle PCs of a neibor region
around each extracted pixel. Size of the neibor region
is determined by window_size.
gndtruth: 1-D numpy.array, sized (sample number) * 1. Truth value
for each extracted pixel.
extracted_pixel_ind:2-D numpy.array, sized (length) * (width). Indicating
which pixels are selected.
"""
# PCA transformation
pca_img, _, _ = PCA_tramsform_img(img=hsi_img, n_principle=n_principle)
# Regularization
hsi_img = scale_to_unit_interval(ndar=hsi_img, eps=1e-8)
length = hsi_img.shape[0]
width = hsi_img.shape[1]
dim = hsi_img.shape[2]
# reshape img, HORIZONTALLY strench the img, without changing the spectral dim.
reshaped_img = numpy.asarray(hsi_img.reshape(length*width, dim),
dtype=theano.config.floatX)
reshaped_gnd = gnd_img.reshape(gnd_img.size)
# mask ensures marginal pixels eliminated, according to window_size
threshold = (window_size-1) / 2
if window_size >= 1 and window_size < width-1 and window_size < length-1:
mask_false = numpy.array([False, ] * width)
mask_true = numpy.hstack((numpy.array([False, ] * threshold, dtype='bool'),
numpy.array([True, ] * (width-2*threshold)),
numpy.array([False, ] * threshold, dtype='bool')))
mask = numpy.vstack((numpy.tile(mask_false, [threshold, 1]),
numpy.tile(mask_true, [length-2*threshold, 1]),
numpy.tile(mask_false, [threshold, 1])))
reshaped_mask = mask.reshape(mask.size)
else:
print >> sys.stderr, ('window_size error. choose 0 < window_size < width-1')
# construct groundtruth, and determine which pixel to process
if flag == 'supervised':
extracted_pixel_ind = (reshaped_gnd > 0) * reshaped_mask
gndtruth = reshaped_gnd[extracted_pixel_ind]
extracted_pixel_ind = numpy.arange(reshaped_gnd.size)[extracted_pixel_ind]
elif flag == 'unsupervised':
extracted_pixel_ind = numpy.arange(reshaped_gnd.size)[reshaped_mask]
gndtruth = numpy.array([], dtype='int')
else:
print >> sys.stderr, ('\"flag\" parameter error. ' +
'What type of learning you are doing?')
return
# construct data_spectral
data_spectral = reshaped_img[extracted_pixel_ind, :]
# construct data_spatial
if window_size == 1:
data_spatial = numpy.array([])
else:
data_spatial = numpy.zeros([extracted_pixel_ind.size,
window_size * window_size * n_principle],
dtype=theano.config.floatX)
i = 0
for ipixel in extracted_pixel_ind:
ipixel_h = ipixel % width
ipixel_v = ipixel / width
data_spatial[i, :] = \
pca_img[ipixel_v-threshold : ipixel_v+threshold+1,
ipixel_h-threshold : ipixel_h+threshold+1, :].reshape(
window_size*window_size*n_principle)
i += 1
# if we want to merge data, merge it
if merge:
data_spectral = numpy.hstack((data_spectral, data_spatial))
return data_spectral, data_spatial, gndtruth, extracted_pixel_ind
def train_valid_test(data, ratio=[6, 2, 2], batch_size=50, random_state=None):
"""
This function splits data into three parts, according to the "ratio" parameter
given in the lists indicating training, validating, testing data ratios.
data: a list containing:
1. A 2-D numpy.array object, with each patterns listed in
ROWs. Input data dimension MUST be larger than 1.
2. A 1-D numpy.array object, tags for each pattern.
'0' indicates that the tag for the corrresponding
pattern is unknown.
ratio: A list having 3 elements, indicating ratio of training,
validating and testing data ratios respectively.
batch_size: bathc_size helps to return an appropriate size of training
samples, which has divisibility over batch_size.
NOTE: batch_size cannot be larger than the minimal size of
all the trainin, validate and test dataset!
random_state: If we give the same random state and the same ratio on the
same data, the function will yield a same split for each
function call.
return:
[train_data_x, train_data_y]:
[valid_data_x, valid_data_y]:
[test_data_x , test_data_y ]:
Lists containing 2 numpy.array object, first for data and
second for truth. They are for training, validate and test
respectively. All the tags are integers in the range
[0, data[1].max()-1].
split_mask
"""
rand_num_generator = numpy.random.RandomState(random_state)
#---------------------------split dataset-----------------------------------
random_mask = rand_num_generator.random_integers(1, sum(ratio), data[0].shape[0])
split_mask = numpy.array(['tests', ] * data[0].shape[0])
split_mask[random_mask <= ratio[0]] = 'train'
split_mask[(random_mask <= ratio[1]+ratio[0]) * (random_mask > ratio[0])] = 'valid'
train_data_x = data[0][split_mask == 'train', :]
train_data_y = data[1][split_mask == 'train']-1
valid_data_x = data[0][split_mask == 'valid', :]
valid_data_y = data[1][split_mask == 'valid']-1
test_data_x = data[0][split_mask == 'tests', :]
test_data_y = data[1][split_mask == 'tests']-1
# tackle the batch size mismatch problem
mis_match = train_data_x.shape[0] % batch_size
if mis_match != 0:
mis_match = batch_size - mis_match
train_data_x = numpy.vstack((train_data_x, train_data_x[0:mis_match, :]))
train_data_y = numpy.hstack((train_data_y, train_data_y[0:mis_match]))
mis_match = valid_data_x.shape[0] % batch_size
if mis_match != 0:
mis_match = batch_size - mis_match
valid_data_x = numpy.vstack((valid_data_x, valid_data_x[0:mis_match, :]))
valid_data_y = numpy.hstack((valid_data_y, valid_data_y[0:mis_match]))
mis_match = test_data_x.shape[0] % batch_size
if mis_match != 0:
mis_match = batch_size - mis_match
test_data_x = numpy.vstack((test_data_x, test_data_x[0:mis_match, :]))
test_data_y = numpy.hstack((test_data_y, test_data_y[0:mis_match]))
return [train_data_x, train_data_y], \
[valid_data_x, valid_data_y], \
[test_data_x , test_data_y], split_mask
def prepare_data(hsi_img=None, gnd_img=None, window_size=7, n_principle=3,
batch_size=50, merge=False, ratio=[6, 2, 2]):
"""
Process the data from file path to splited train-valid-test sets; Binded in
dataset_spectral and dataset_spatial respectively.
Parameters
----------
hsi_img=None: 3-D numpy.ndarray, dtype=float, storing initial
hyperspectral image data.
gnd_img=None: 2-D numpy.ndarray, dtype=int, containing tags for pixeles.
The size is the same to the hsi_img size, but with only
1 band.
window_size: Size of spatial window. Pass an integer 1 if no spatial
infomation needed.
n_principle: This many principles you want to incorporate while
extracting spatial info.
merge: If merge==True, the returned dataset_spectral has
dataset_spatial stacked in the tail of it; else if
merge==False, the returned dataset_spectral and
dataset_spatial will have spectral and spatial information
only, respectively.
Return
------
dataset_spectral:
dataset_spatial:
extracted_pixel_ind:
split_mask:
"""
data_spectral, data_spatial, gndtruth, extracted_pixel_ind = \
T_pca_constructor(hsi_img=hsi_img, gnd_img=gnd_img, n_principle=n_principle,
window_size=window_size, flag='supervised')
################ separate train, valid and test spatial data ###############
[train_spatial_x, train_y], [valid_spatial_x, valid_y], [test_spatial_x, test_y], split_mask = \
train_valid_test(data=[data_spatial, gndtruth], ratio=ratio,
batch_size=batch_size, random_state=123)
# convert them to theano.shared values
train_set_x = theano.shared(value=train_spatial_x, name='train_set_x', borrow=True)
valid_set_x = theano.shared(value=valid_spatial_x, name='valid_set_x', borrow=True)
test_set_x = theano.shared(value=test_spatial_x, name='test_set_x', borrow=True)
train_set_y = theano.shared(value=train_y, name='train_set_y', borrow=True)
valid_set_y = theano.shared(value=valid_y, name='valid_set_y', borrow=True)
test_set_y = theano.shared(value=test_y, name='test_set_y', borrow=True)
dataset_spatial = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
############### separate train, valid and test spectral data ###############
[train_spectral_x, train_y], [valid_spectral_x, valid_y], [test_spectral_x, test_y], split_mask = \
train_valid_test(data=[data_spectral, gndtruth], ratio=ratio,
batch_size=batch_size, random_state=123)
# if we want to merge data, merge it
if merge:
train_spectral_x = numpy.hstack((train_spectral_x, train_spatial_x))
valid_spectral_x = numpy.hstack((valid_spectral_x, valid_spatial_x))
test_spectral_x = numpy.hstack((test_spectral_x, test_spatial_x))
# convert them to theano.shared values
train_set_x = theano.shared(value=train_spectral_x, name='train_set_x', borrow=True)
valid_set_x = theano.shared(value=valid_spectral_x, name='valid_set_x', borrow=True)
test_set_x = theano.shared(value=test_spectral_x, name='test_set_x', borrow=True)
train_set_y = theano.shared(value=train_y, name='train_set_y', borrow=True)
valid_set_y = theano.shared(value=valid_y, name='valid_set_y', borrow=True)
test_set_y = theano.shared(value=test_y, name='test_set_y', borrow=True)
dataset_spectral = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return dataset_spectral, dataset_spatial, extracted_pixel_ind, split_mask
if __name__ == '__main__':
""" Sample usage. """
print ('... Testing function result_analysis')
import random
from sklearn import svm, datasets
# import some data to play with
print ('... loading Iris data')
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_samples, n_features = X.shape
p = range(n_samples)
random.seed(0)
random.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# Run classifier
print ('... classifying')
classifier = svm.SVC(kernel='linear')
y_ = classifier.fit(X[:half], y[:half]).predict(X[half:])
result_analysis(y_, y[:half], numpy.asarray([]), y[half:])
# load .mat files
print ('... loading KSC data')
hsi_file = u'/home/hantek/data/hsi_data/kennedy/Kennedy_denoise.mat'
gnd_file = u'/home/hantek/data/hsi_data/kennedy/Kennedy_groundtruth.mat'
data = sio.loadmat(hsi_file)
img = numpy.float_(data['Kennedy176'])
data = sio.loadmat(gnd_file)
gnd_img = data['Kennedy_groundtruth']
gnd_img = gnd_img.astype(numpy.int32)
print ('... spliting train-valid-test sets')
dataset_spectral, dataset_spatial, extracted_pixel_ind, split_mask = \
prepare_data(hsi_img=img, gnd_img=gnd_img, window_size=7, n_principle=3, batch_size=50, merge=True)
if raw_input('Spliting finished. Do you want to check the data (Y/n)? ') == 'Y':
spectral_train_x = dataset_spectral[0][0].get_value()
spectral_train_y = dataset_spectral[0][1].get_value()
spectral_valid_x = dataset_spectral[1][0].get_value()
spectral_valid_y = dataset_spectral[1][1].get_value()
spectral_test_x = dataset_spectral[2][0].get_value()
spectral_test_y = dataset_spectral[2][1].get_value()
spatial_train_x = dataset_spatial[0][0].get_value()
spatial_train_y = dataset_spatial[0][1].get_value()
spatial_valid_x = dataset_spatial[1][0].get_value()
spatial_valid_y = dataset_spatial[1][1].get_value()
spatial_test_x = dataset_spatial[2][0].get_value()
spatial_test_y = dataset_spatial[2][1].get_value()
print ('shape of:' )
print ('spectral_train_x: \t', )
print (spectral_train_x.shape, )
print ('spectral_train_y: \t', )
print (spectral_train_y.shape)
print ('spectral_valid_x: \t', )
print (spectral_valid_x.shape,)
print ('spectral_valid_y: \t', )
print (spectral_valid_y.shape)
print ('spectral_test_x: \t', )
print (spectral_test_x.shape,)
print ('spectral_test_y: \t', )
print (spectral_test_y.shape)
print ('spatial_train_x: \t', )
print (spatial_train_x.shape,)
print ('spatial_train_y: \t', )
print (spatial_train_y.shape)
print ('spatial_valid_x: \t', )
print (spatial_valid_x.shape, )
print ('spatial_valid_y: \t', )
print (spatial_valid_y.shape)
print ('spatial_test_x: \t', )
print (spatial_test_x.shape, )
print ('spatial_test_y: \t', )
print (spatial_test_y.shape)
print ('total tagged pixel number: %d' % extracted_pixel_ind.shape[0])
print ('split_mask shape: %d' % split_mask.shape)
print ('... checking tags in spatial and spectral data')
trainset_err = numpy.sum((spectral_train_y-spatial_train_y) ** 2)
validset_err = numpy.sum((spectral_valid_y-spatial_valid_y) ** 2)
testset_err = numpy.sum((spectral_test_y-spatial_test_y) ** 2)
if testset_err + validset_err + trainset_err == 0:
print ('Checking test PASSED.')
else:
print ('Checking test FAILED.')
if raw_input('Do you want to save results to data.mat (Y/n)? ') == 'Y':
print ('... saving datasets')
sio.savemat('data.mat', {
'spectral_train_x': dataset_spectral[0][0].get_value(),
'spectral_train_y': dataset_spectral[0][1].get_value(),
'spectral_valid_x': dataset_spectral[1][0].get_value(),
'spectral_valid_y': dataset_spectral[1][1].get_value(),
'spectral_test_x': dataset_spectral[2][0].get_value(),
'spectral_test_y': dataset_spectral[2][1].get_value(),
'spatial_train_x': dataset_spatial[0][0].get_value(),
'spatial_train_y': dataset_spatial[0][1].get_value(),
'spatial_valid_x': dataset_spatial[1][0].get_value(),
'spatial_valid_y': dataset_spatial[1][1].get_value(),
'spatial_test_x': dataset_spatial[2][0].get_value(),
'spatial_test_y': dataset_spatial[2][1].get_value(),
'extracted_pixel_ind': extracted_pixel_ind,
'split_mask': split_mask})
print ('Done.')
| mit |
endlessm/chromium-browser | tools/perf/cli_tools/pinpoint_cli/histograms_df_test.py | 5 | 4115 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from cli_tools.pinpoint_cli import histograms_df
from core.external_modules import pandas
from tracing.value import histogram
from tracing.value import histogram_set
from tracing.value.diagnostics import date_range
from tracing.value.diagnostics import generic_set
def TestHistogram(name, units, values, **kwargs):
def DiagnosticValue(value):
if isinstance(value, (int, long)):
return date_range.DateRange(value)
elif isinstance(value, list):
return generic_set.GenericSet(value)
else:
raise NotImplementedError(type(value))
hist = histogram.Histogram(name, units)
hist.diagnostics.update(
(key, DiagnosticValue(value)) for key, value in kwargs.iteritems())
for value in values:
hist.AddSample(value)
return hist
@unittest.skipIf(pandas is None, 'pandas not available')
class TestHistogramsDf(unittest.TestCase):
def testIterRows(self):
run1 = {'benchmarkStart': 1234567890000, 'labels': ['run1'],
'benchmarks': ['system_health'], 'deviceIds': ['device1']}
# Second run on same device ten minutes later.
run2 = {'benchmarkStart': 1234567890000 + 600000, 'labels': ['run2'],
'benchmarks': ['system_health'], 'deviceIds': ['device1']}
hists = histogram_set.HistogramSet([
TestHistogram('startup', 'ms', [8, 10, 12], stories=['story1'],
traceUrls=['http://url/to/trace1'], **run1),
TestHistogram('memory', 'sizeInBytes', [256], stories=['story2'],
traceUrls=['http://url/to/trace2'], **run1),
TestHistogram('memory', 'sizeInBytes', [512], stories=['story2'],
traceUrls=['http://url/to/trace3'], **run2),
])
expected = [
('startup', 'ms', 10.0, 2.0, 3, 'run1', 'system_health',
'story1', '2009-02-13 23:31:30', 'device1', 'http://url/to/trace1'),
('memory', 'sizeInBytes', 256.0, 0.0, 1, 'run1', 'system_health',
'story2', '2009-02-13 23:31:30', 'device1', 'http://url/to/trace2'),
('memory', 'sizeInBytes', 512.0, 0.0, 1, 'run2', 'system_health',
'story2', '2009-02-13 23:41:30', 'device1', 'http://url/to/trace3'),
]
self.assertItemsEqual(histograms_df.IterRows(hists.AsDicts()), expected)
def testDataFrame(self):
run1 = {'benchmarkStart': 1234567890000, 'labels': ['run1'],
'benchmarks': ['system_health'], 'deviceIds': ['device1']}
# Second run on same device ten minutes later.
run2 = {'benchmarkStart': 1234567890000 + 600000, 'labels': ['run2'],
'benchmarks': ['system_health'], 'deviceIds': ['device1']}
hists = histogram_set.HistogramSet([
TestHistogram('startup', 'ms', [8, 10, 12], stories=['story1'],
traceUrls=['http://url/to/trace1'], **run1),
TestHistogram('memory', 'sizeInBytes', [256], stories=['story2'],
traceUrls=['http://url/to/trace2'], **run1),
TestHistogram('memory', 'sizeInBytes', [384], stories=['story2'],
traceUrls=['http://url/to/trace3'], **run2),
])
df = histograms_df.DataFrame(hists.AsDicts())
# Poke at the data frame and check a few known facts about our fake data:
# It has 3 histograms.
self.assertEqual(len(df), 3)
# The benchmark has two stories.
self.assertItemsEqual(df['story'].unique(), ['story1', 'story2'])
# We recorded three traces.
self.assertEqual(len(df['trace_url'].unique()), 3)
# All benchmarks ran on the same device.
self.assertEqual(len(df['device_id'].unique()), 1)
# There is a memory regression between runs 1 and 2.
memory = df.set_index(['name', 'run_label']).loc['memory']['mean']
self.assertEqual(memory['run2'] - memory['run1'], 128.0)
# Ten minutes passed between the two benchmark runs.
self.assertEqual(df['benchmark_start'].max() - df['benchmark_start'].min(),
pandas.Timedelta('10 minutes'))
| bsd-3-clause |
vshtanko/scikit-learn | sklearn/setup.py | 225 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
viswimmer1/PythonGenerator | data/python_files/30585407/simple_car_test.py | 1 | 5170 | import os, sys
# U_bar[1,t-1] = -0.005
up_path = os.path.abspath('..')
sys.path.append(up_path)
import numpy as np
import matplotlib.pyplot as plt
from sim_env import SimEnv2D, Beacon, CircularObstacle, RectangularObstacle
from robots import SimpleCar, LocalizerBot
from sensors import BeaconSensor, FOVSensor
from math import pi
from utils import mat2tuple
import random
from math import log
from numpy.random import multivariate_normal as mvn
from kalman_filter import ekf_update
from covar import cov2vec, vec2cov
from optimize import scp_solver_beliefs
from scipy.io import loadmat
import matplotlib.animation as animation
colors = ['b', 'g', 'r', 'c', 'm', 'y']
# Set up environment #args
beacons=[Beacon(np.array([0.2,0.2])),
Beacon(np.array([1.2, 0.5])),
Beacon(np.array([0.2, 0.8]))]
#obstacles = [RectangularObstacle(np.array([[0.75, 0.2], [0.75, 0.4], [0.85, 0.4], [0.85,0.2]], float).T),\
# RectangularObstacle(np.array([[0.5,0.85], [1.15,0.85], [1.15,0.6], [0.5,0.6]], float).T)]
obstacles=[]
s = SimEnv2D(bounds=[-0.1, 1.5, -0.1, 1], beacons=beacons, obstacles=obstacles)
ball = np.array([1.4, 0.30])
x0 = np.array([0, 0.5, 0, 0])
car = SimpleCar(x0)
car.attach_sensor(BeaconSensor(decay_coeff=25), lambda x: x[0:2])
localizer = LocalizerBot(car,ball)
x0 = np.mat(localizer.x)
localizer.attach_sensor(FOVSensor(localizer.x, fov_angle=2*pi, decay_coeff=25), lambda x: localizer.fov_state(x))
s.add_robot(localizer)
# Number of timesteps
T = 30 #arg
# Dynamics and measurement noise
num_states = localizer.NX
num_ctrls = localizer.NU
num_measure = len(beacons)+1+1 #arg/make part of robot observe
Q = np.mat(np.diag([1e-5]*num_states)) #arg
Q[2,2] = 1e-8 # Gets out of hand if noise in theta or phi
Q[3,3] = 1e-8 # Can also add theta/phi to measurement like Sameep #TODO?
Q[4,4] = 1e-10 # Can also add theta/phi to measurement like Sameep #TODO?
Q[5,5] = 1e-10 # Can also add theta/phi to measurement like Sameep #TODO?
R = np.mat(np.diag([0.005]*num_measure)) #arg
R[4,4] = 5e-3
#R[3,3] = 1e-9
# Sample noise
dynamics_noise = mvn([0]*num_states, Q, T-1).T*0 #FIXME
measurement_noise = mvn([0]*num_measure, R, T-1).T*0 #FIXME
# Setup for EKF
mus = np.mat(np.zeros((num_states,T)))
mus[:,0] = np.mat(x0).T
Sigmas = np.zeros((Q.shape[0], Q.shape[1],T))
Sigmas[:,:,0] = np.mat(np.diag([0.0002]*num_states)) #arg
Sigmas[2,2,0] = 0.0000001
# Generate nominal belief trajectory
X_bar = np.mat(np.zeros((localizer.NX, T))) #arg
X_bar[:,0] = np.mat(x0).T
U_bar = np.ones((localizer.NU, T-1))*0.35
for t in xrange(1,T):
U_bar[1,t-1] = -0.005
#print U_bar
for t in xrange(1,T):
X_bar[:,t] = np.mat(localizer.dynamics(X_bar[:,t-1], U_bar[:, t-1])) +\
np.mat(dynamics_noise[:,t-1]).T
mus[:,t], Sigmas[:,:,t] = ekf_update(localizer.dynamics,
lambda x: localizer.observe(s, x=x),
Q, R, mus[:,t-1], Sigmas[:,:,t-1],
U_bar[:,t-1], None) #NOTE No obs
# Plot nominal trajectory with covariance ellipses
ax = plt.gca()
s.draw(ax=ax)
localizer.draw_trajectory(mat2tuple(X_bar.T), mus=X_bar, Sigmas=Sigmas[0:2,0:2,:], color='yellow')
localizer.draw_trajectory([], mus=X_bar[4:6,0:1], Sigmas=Sigmas[4:6,4:6,0:1], color='yellow')
localizer.draw_trajectory([], mus=X_bar[4:6,T-2:T-1], Sigmas=Sigmas[4:6,4:6,T-2:T-1], color='yellow')
#for t in range(0,T):
# localizer.mark_fov(X_bar[:,t], s, [-1, 1, -1, 1], color=colors[t % len(colors)])
#plt.show()
#stop
Bel_bar = np.mat(np.zeros((localizer.NB, T)))
for t in xrange(T):
Bel_bar[:,t] = np.vstack((X_bar[:,t], cov2vec(Sigmas[:,:,t])))
'''
fig = plt.gcf()
s.draw_goal_state(Bel_bar[:,-1])
car.draw_trail(mat2tuple(Bel_bar[0:4,:].T))
ims = car.get_animation_artists(Bel_bar)
im_ani = animation.ArtistAnimation(fig, ims, interval=100,
blit=True)
plt.show()
stop
'''
# Apply SCP
rho_bel = 0.1
rho_u = 0.05
N_iter = 1
goal_bel = np.copy(Bel_bar[:,-1])
goal_bel[0:2] = np.mat(ball).T
goal_bel[localizer.NX:] = 0
opt_bels, opt_ctrls, opt_vals = scp_solver_beliefs(s, Bel_bar.copy(), U_bar,\
Q, R, rho_bel, rho_u, goal_bel, N_iter, localizer.NX, method='shooting')
opt_mus = np.mat(np.zeros((localizer.NX, T)))
opt_mus[:,0] = X_bar[:,0]
opt_X = opt_mus.copy()
opt_Sigmas = np.zeros((Q.shape[0], Q.shape[1],T))
opt_Sigmas[:,:,0] = Sigmas[:,:,0]
opt_ctrls = np.mat(opt_ctrls)
for t in xrange(1,T):
opt_X[:,t] = localizer.dynamics(opt_X[:,t-1], opt_ctrls[:,t-1]);
opt_mus[:,t], opt_Sigmas[:,:,t] = ekf_update(localizer.dynamics,
lambda x: localizer.observe(s, x=x),
Q, R, opt_mus[:,t-1], opt_Sigmas[:,:,t-1], opt_ctrls[:,t-1], None)
print goal_bel.T
print opt_X[:,T-1].T
#ax = plt.subplot(122)
#s.draw(ax=ax)
localizer.draw_trajectory(mat2tuple(opt_X.T), mus=opt_mus, Sigmas=opt_Sigmas[0:2,0:2,:], color='green')
localizer.draw_trajectory([], mus=opt_mus[4:6,0:1], Sigmas=opt_Sigmas[4:6,4:6,0:1], color='green')
localizer.draw_trajectory([], mus=opt_mus[4:6,T-2:T-1], Sigmas=opt_Sigmas[4:6,4:6,T-2:T-1], color='green')
plt.show()
| gpl-2.0 |
Adai0808/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 267 | 6813 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| bsd-3-clause |
Sklearn-HMM/scikit-learn-HMM | sklean-hmm/manifold/tests/test_isomap.py | 31 | 3991 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
qiqi/fds | apps/charles_cylinder3D_Lyapunov/draw_Lyapunov/drawCLV.py | 1 | 4385 | # This file reads the checkpoint files, computes CLVs, call charles.exe for 0 step,
# and genereate the flow field solution for state variables: rho, rhoE, rhoU
from __future__ import division
import os
import sys
import time
import shutil
import string
import tempfile
import argparse
import subprocess
from multiprocessing import Manager
from numpy import *
from charles import *
import matplotlib
matplotlib.use('Agg')
from matplotlib.pyplot import *
rcParams.update({'axes.labelsize':'xx-large'})
rcParams.update({'xtick.labelsize':'xx-large'})
rcParams.update({'ytick.labelsize':'xx-large'})
rcParams.update({'legend.fontsize':'xx-large'})
rc('font', family='sans-serif')
sys.path.append("/scratch/niangxiu/fds_4CLV_finer_reso")
from fds import *
from fds.checkpoint import *
from fds.cti_restart_io import *
from fds.compute import run_compute
sys.setrecursionlimit(12000)
M_MODES = array([0,7,16,39])
total_MODES = 40
K_SEGMENTS = (400,)
# K_SEGMENTS = range(350, 451)
MPI_NP = 1
MY_PATH = os.path.abspath('/scratch/niangxiu/fds_4CLV_finer_reso/apps')
BASE_PATH = os.path.join(MY_PATH, 'charles')
CLV_PATH = os.path.join(MY_PATH, 'CLV')
if os.path.exists(CLV_PATH):
shutil.rmtree(CLV_PATH)
os.mkdir(CLV_PATH)
RESULT_PATH = []
for j in M_MODES:
RESULT_PATH.append(os.path.join(CLV_PATH, 'CLV'+str(j)))
os.mkdir(RESULT_PATH[-1])
REF_PATH = os.path.join(MY_PATH, 'ref')
REF_DATA_FILE = os.path.join(REF_PATH, 'initial.les')
REF_SUPP_FILE = os.path.join(REF_PATH, 'charles.in')
CHARLES_BIN = os.path.join(REF_PATH, 'charles.exe')
checkpoint = load_last_checkpoint(BASE_PATH, total_MODES)
assert verify_checkpoint(checkpoint)
C = checkpoint.lss.lyapunov_covariant_vectors()
# someone evilly rolled the axis in the lyapunov_covariant_vectors function
C = rollaxis(C,2)
C = rollaxis(C,2) # now the shape is [K_SEGMENTS, M_MODES, M_MODES]
I = [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 9, 12, 13, 14, 15, 18, 17, 16, 22, 21, 19, 20, 23, 24, 26, 25, 31, 27, 28, 30, 35, 29, 37, 33, 38, 36, 34, 32, 39]
I = array(I)
print('C.shape = ', C.shape)
def les2vtu(work_path, solut_path, j):
# run charles for 0 step
outfile = os.path.join(work_path, 'out')
with open(outfile, 'w', 8) as f:
subprocess.call(['mpiexec', '-n', str(MPI_NP), charles_bin],
cwd=work_path, stdout=f, stderr=f)
# copy the results file and delete the working folder
result_file = os.path.join(solut_path, 'z0_plane.000000.vtu')
shutil.copy(result_file, os.path.join(RESULT_PATH[j], 'z0_plane_seg.'+str(i_segment)+'.vtu'))
shutil.rmtree(work_path)
# draw CLV field
for i_segment in K_SEGMENTS:
print('i_segment = ', i_segment)
checkpoint = load_checkpoint(os.path.join(BASE_PATH, 'm'+str(total_MODES)+'_segment'+str(i_segment)))
assert verify_checkpoint(checkpoint)
u0, V, v, lss, G_lss, g_lss, J_hist, G_dil, g_dil = checkpoint
manager = Manager()
interprocess = (manager.Lock(), manager.dict())
run_compute([V], spawn_compute_job=None, interprocess=interprocess)
V = V.field
print(V.shape)
# construct CLV at this segment
CLV = dot(V.T, C[i_segment, :, :])
CLV = CLV.T
# plot CLV
for j, j_mode in enumerate(I[M_MODES]):
run_id = 'CLV'+str(j_mode)+'_seg'+str(i_segment)
print('runid:', run_id)
work_path = os.path.join(CLV_PATH, run_id)
os.mkdir(work_path)
solut_path = os.path.join(work_path, 'SOLUT_2')
os.mkdir(solut_path)
initial_data_file = os.path.join(work_path, 'initial.les')
shutil.copy(REF_DATA_FILE, initial_data_file)
shutil.copy(REF_SUPP_FILE, work_path)
print(CLV[j_mode].shape)
save_compressible_les_normalized(initial_data_file, make_data(CLV[j_mode]), verbose=False)
les2vtu(work_path, solut_path, j)
# plot flow field
run_id = 'primal'+'_seg'+str(i_segment)
print('runid:', run_id)
work_path = os.path.join(CLV_PATH, run_id)
os.mkdir(work_path)
solut_path = os.path.join(work_path, 'SOLUT_2')
os.mkdir(solut_path)
initial_data_file = os.path.join(work_path, 'initial.les')
shutil.copy(REF_DATA_FILE, initial_data_file)
shutil.copy(REF_SUPP_FILE, work_path)
# print('u0 shape: ', u0.field.shape)
save_compressible_les(initial_data_file, make_data(u0.field), verbose=False)
les2vtu(work_path, solut_path, j)
| gpl-3.0 |
OLC-LOC-Bioinformatics/geneSipprV2 | sipprverse/validation/genesippr_validation.py | 2 | 46148 | #!/usr/bin/env python3
from olctools.accessoryFunctions.accessoryFunctions import GenObject, make_path, MetadataObject, relative_symlink, \
run_subprocess, SetupLogging
from genewrappers.biotools import bbtools
from argparse import ArgumentParser
from Bio import SeqIO
from glob import glob
from time import time
import logging
import psutil
import json
import os
__author__ = 'adamkoziol'
class ReadPrep(object):
def main(self):
self.strains()
self.sequence_prep()
self.assembly_length()
self.simulate_reads()
self.read_length_adjust('simulated')
self.link_reads('simulated')
self.read_quality_trim()
self.sample_reads()
self.read_length_adjust('sampled')
self.link_reads('sampled')
self.run_genesippr()
# self.parse_genesippr()
# self.run_cowbat()
def strains(self):
"""
Create a dictionary of SEQID: OLNID from the supplied
"""
with open(os.path.join(self.path, 'strains.csv')) as strains:
next(strains)
for line in strains:
oln, seqid = line.split(',')
self.straindict[oln] = seqid.rstrip()
self.strainset.add(oln)
logging.debug(oln)
if self.debug:
break
def sequence_prep(self):
"""
Create metadata objects for all PacBio assembly FASTA files in the sequencepath.
Create individual subdirectories for each sample.
Relative symlink the original FASTA file to the appropriate subdirectory
"""
# Create a sorted list of all the FASTA files in the sequence path
strains = sorted(glob(os.path.join(self.fastapath, '*.fa*'.format(self.fastapath))))
for sample in strains:
# Create the object
metadata = MetadataObject()
# Set the sample name to be the file name of the sequence by removing the path and file extension
sample_name = os.path.splitext(os.path.basename(sample))[0]
if sample_name in self.strainset:
# Extract the OLNID from the dictionary using the SEQID
samplename = self.straindict[sample_name]
# samplename = sample_name
# Set and create the output directory
outputdir = os.path.join(self.path, samplename)
make_path(outputdir)
# Set the name of the JSON file
json_metadata = os.path.join(outputdir, '{name}.json'.format(name=samplename))
if not os.path.isfile(json_metadata):
# Create the name and output directory attributes
metadata.name = samplename
metadata.seqid = sample_name
metadata.outputdir = outputdir
metadata.jsonfile = json_metadata
# Set the name of the FASTA file to use in the analyses
metadata.bestassemblyfile = os.path.join(metadata.outputdir,
'{name}.fasta'.format(name=metadata.name))
# Symlink the original file to the output directory
relative_symlink(sample, outputdir, '{sn}.fasta'.format(sn=metadata.name))
# Associate the corresponding FASTQ files with the assembly
metadata.fastqfiles = sorted(glob(os.path.join(self.fastqpath,
'{name}*.gz'.format(name=metadata.name))))
metadata.forward_fastq, metadata.reverse_fastq = metadata.fastqfiles
# Write the object to file
self.write_json(metadata)
else:
metadata = self.read_json(json_metadata)
# Add the metadata object to the list of objects
self.metadata.append(metadata)
@staticmethod
def write_json(metadata):
"""
Write the metadata object to file
:param metadata: Metadata object
"""
# Open the metadata file to write
with open(metadata.jsonfile, 'w') as metadatafile:
# Write the json dump of the object dump to the metadata file
json.dump(metadata.dump(), metadatafile, sort_keys=True, indent=4, separators=(',', ': '))
@staticmethod
def read_json(json_metadata):
"""
Read the metadata object from file
:param json_metadata: Path and file name of JSON-formatted metadata object file
:return: metadata object
"""
# Load the metadata object from the file
with open(json_metadata) as metadatareport:
jsondata = json.load(metadatareport)
# Create the metadata objects
metadata = MetadataObject()
# Initialise the metadata categories as GenObjects created using the appropriate key
for attr in jsondata:
if not isinstance(jsondata[attr], dict):
setattr(metadata, attr, jsondata[attr])
else:
setattr(metadata, attr, GenObject(jsondata[attr]))
return metadata
def assembly_length(self):
"""
Use SeqIO.parse to extract the total number of bases in each assembly file
"""
for sample in self.metadata:
# Only determine the assembly length if is has not been previously calculated
if not GenObject.isattr(sample, 'assembly_length'):
# Create the assembly_length attribute, and set it to 0
sample.assembly_length = 0
for record in SeqIO.parse(sample.bestassemblyfile, 'fasta'):
# Update the assembly_length attribute with the length of the current contig
sample.assembly_length += len(record.seq)
# Write the updated object to file
self.write_json(sample)
def simulate_reads(self):
"""
Use the PacBio assembly FASTA files to generate simulated reads of appropriate forward and reverse lengths
at different depths of sequencing using randomreads.sh from the bbtools suite
"""
logging.info('Read simulation')
for sample in self.metadata:
# Create the simulated_reads GenObject
sample.simulated_reads = GenObject()
# Iterate through all the desired depths of coverage
for depth in self.read_depths:
# Create the depth GenObject
setattr(sample.simulated_reads, depth, GenObject())
# Set the depth and output directory attributes for the depth GenObject
sample.simulated_reads[depth].depth = depth
sample.simulated_reads[depth].depth_dir = os.path.join(sample.outputdir, 'simulated', depth)
# Create the output directory
make_path(sample.simulated_reads[depth].depth_dir)
# Iterate through all the desired forward and reverse read pair lengths
for read_pair in self.read_lengths:
# Create the read_pair GenObject within the depth GenObject
setattr(sample.simulated_reads[depth], read_pair, GenObject())
# Set and create the output directory
sample.simulated_reads[depth][read_pair].outputdir = \
os.path.join(sample.simulated_reads[depth].depth_dir, read_pair)
make_path(sample.simulated_reads[depth][read_pair].outputdir)
# Create both forward_reads and reverse_reads sub-GenObjects
sample.simulated_reads[depth][read_pair].forward_reads = GenObject()
sample.simulated_reads[depth][read_pair].reverse_reads = GenObject()
# Extract the forward and reverse reads lengths from the read_pair variable
sample.simulated_reads[depth][read_pair].forward_reads.length, \
sample.simulated_reads[depth][read_pair].reverse_reads.length = read_pair.split('_')
# Set the name of the forward reads - include the depth and read length information
sample.simulated_reads[depth][read_pair].forward_reads.fastq = \
os.path.join(sample.simulated_reads[depth][read_pair].outputdir,
'{name}_{depth}_{read_pair}_R1.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Reverse reads
sample.simulated_reads[depth][read_pair].reverse_reads.fastq = \
os.path.join(sample.simulated_reads[depth][read_pair].outputdir,
'{name}_{depth}_{read_pair}_R2.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Create the trimmed output directory attribute
sample.simulated_reads[depth][read_pair].simulated_trimmed_outputdir \
= os.path.join(sample.simulated_reads[depth][read_pair].outputdir,
'simulated_trimmed')
# Set the name of the forward trimmed reads - include the depth and read length information
# This is set now, as the untrimmed files will be removed, and a check is necessary
sample.simulated_reads[depth][read_pair].forward_reads.trimmed_simulated_fastq = \
os.path.join(sample.simulated_reads[depth][read_pair].simulated_trimmed_outputdir,
'{name}_simulated_{depth}_{read_pair}_R1.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Reverse reads
sample.simulated_reads[depth][read_pair].reverse_reads.trimmed_simulated_fastq = \
os.path.join(sample.simulated_reads[depth][read_pair].simulated_trimmed_outputdir,
'{name}_simulated_{depth}_{read_pair}_R2.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Calculate the number of reads required for the forward and reverse reads to yield the
# desired coverage depth e.g. 5Mbp genome at 20X coverage: 100Mbp in reads. 50bp forward reads
# 150bp reverse reads: forward proportion is 50 / (150 + 50) = 0.25 (and reverse is 0.75).
# Forward total reads is 25Mbp (75Mbp reverse). Number of reads required = 25Mbp / 50 bp
# 500000 reads total (same for reverse, as the reads are longer)
sample.simulated_reads[depth][read_pair].num_reads = \
int(sample.assembly_length *
int(depth) *
(int(sample.simulated_reads[depth][read_pair].forward_reads.length) /
(int(sample.simulated_reads[depth][read_pair].forward_reads.length) +
int(sample.simulated_reads[depth][read_pair].reverse_reads.length)
)
) /
int(sample.simulated_reads[depth][read_pair].forward_reads.length)
)
logging.info(
'Simulating {num_reads} paired reads for sample {name} with the following parameters:\n'
'depth {dp}, forward reads {fl}bp, and reverse reads {rl}bp'
.format(num_reads=sample.simulated_reads[depth][read_pair].num_reads,
dp=depth,
name=sample.name,
fl=sample.simulated_reads[depth][read_pair].forward_reads.length,
rl=sample.simulated_reads[depth][read_pair].reverse_reads.length))
# If the reverse reads are set to 0, supply different parameters to randomreads
if sample.simulated_reads[depth][read_pair].reverse_reads.length != '0':
# Ensure that both the simulated reads, and the trimmed simulated reads files don't
# exist before simulating the reads
if not os.path.isfile(sample.simulated_reads[depth][read_pair].forward_reads.fastq) and \
not os.path.isfile(
sample.simulated_reads[depth][read_pair].forward_reads.trimmed_simulated_fastq):
# Use the randomreads method in the OLCTools bbtools wrapper to simulate the reads
out, \
err, \
sample.simulated_reads[depth][read_pair].forward_reads.simulate_call = bbtools\
.randomreads(reference=sample.bestassemblyfile,
length=sample.simulated_reads[depth][read_pair].reverse_reads.length,
reads=sample.simulated_reads[depth][read_pair].num_reads,
out_fastq=sample.simulated_reads[depth][read_pair].forward_reads.fastq,
paired=True,
returncmd=True,
**{'ziplevel': '9',
'illuminanames': 't',
'Xmx': self.mem}
)
else:
try:
forward_size = os.path.getsize(sample.simulated_reads[depth][read_pair]
.forward_reads.fastq)
except FileNotFoundError:
forward_size = 0
try:
reverse_size = os.path.getsize(sample.simulated_reads[depth][read_pair]
.reverse_reads.fastq)
except FileNotFoundError:
reverse_size = 0
if forward_size <= 100 or reverse_size <= 100:
try:
os.remove(sample.simulated_reads[depth][read_pair].forward_reads.fastq)
except FileNotFoundError:
pass
try:
os.remove(sample.simulated_reads[depth][read_pair].reverse_reads.fastq)
except FileNotFoundError:
pass
# Use the randomreads method in the OLCTools bbtools wrapper to simulate the reads
out, \
err, \
sample.simulated_reads[depth][read_pair].forward_reads.simulate_call = bbtools \
.randomreads(reference=sample.bestassemblyfile,
length=sample.simulated_reads[depth][read_pair].reverse_reads.length,
reads=sample.simulated_reads[depth][read_pair].num_reads,
out_fastq=sample.simulated_reads[depth][read_pair].forward_reads.fastq,
paired=True,
returncmd=True,
**{'ziplevel': '9',
'illuminanames': 't'}
)
else:
if not os.path.isfile(sample.simulated_reads[depth][read_pair].forward_reads.fastq):
# Use the randomreads method in the OLCTools bbtools wrapper to simulate the reads
out, \
err, \
sample.simulated_reads[depth][read_pair].forward_reads.simulate_call = bbtools\
.randomreads(reference=sample.bestassemblyfile,
length=sample.simulated_reads[depth][read_pair].forward_reads.length,
reads=sample.simulated_reads[depth][read_pair].num_reads,
out_fastq=sample.simulated_reads[depth][read_pair].forward_reads.fastq,
paired=False,
returncmd=True,
**{'ziplevel': '9',
'illuminanames': 't'}
)
# Update the JSON file
self.write_json(sample)
def read_length_adjust(self, analysistype):
"""
Trim the reads to the correct length using reformat.sh
:param analysistype: current analysis type. Will be either 'simulated' or 'sampled'
"""
logging.info('Trimming {at} reads'.format(at=analysistype))
for sample in self.metadata:
# Iterate through all the desired depths of coverage
for depth in self.read_depths:
for read_pair in self.read_lengths:
# Create variables using the analysis type. These will be used in setting GenObject attributes
read_type = '{at}_reads'.format(at=analysistype)
fastq_type = 'trimmed_{at}_fastq'.format(at=analysistype)
logging.info(
'Trimming forward {at} reads for sample {name} at depth {depth} to length {length}'
.format(at=analysistype,
name=sample.name,
depth=depth,
length=sample[read_type][depth][read_pair].forward_reads.length))
# Create the output path if necessary
make_path(os.path.dirname(sample[read_type][depth][read_pair].forward_reads[fastq_type]))
if sample[read_type][depth][read_pair].reverse_reads.length != '0':
# Use the reformat method in the OLCTools bbtools wrapper to trim the reads
out, \
err, \
sample[read_type][depth][read_pair].forward_reads.sample_call = bbtools \
.reformat_reads(forward_in=sample[read_type][depth][read_pair].forward_reads.fastq,
reverse_in=None,
forward_out=sample[read_type][depth][read_pair].forward_reads[fastq_type],
returncmd=True,
**{'ziplevel': '9',
'forcetrimright':
sample[read_type][depth][read_pair].forward_reads.length,
'tossbrokenreads': 't',
'tossjunk': 't',
'Xmx': self.mem
}
)
# # Remove the untrimmed reads
# try:
# os.remove(sample[read_type][depth][read_pair].forward_reads.fastq)
# except FileNotFoundError:
# pass
else:
# If the files do not need to be trimmed, create a symlink to the original file
relative_symlink(sample[read_type][depth][read_pair].forward_reads.fastq,
os.path.dirname(sample[read_type][depth][read_pair].
forward_reads[fastq_type]),
os.path.basename(sample[read_type][depth][read_pair].
forward_reads[fastq_type])
)
# Same as above, but for the reverse reads
logging.info(
'Trimming reverse {at} reads for sample {name} at depth {depth} to length {length}'
.format(at=analysistype,
name=sample.name,
depth=depth,
length=sample[read_type][depth][read_pair].reverse_reads.length))
if sample[read_type][depth][read_pair].reverse_reads.length != '0':
# Use the reformat method in the OLCTools bbtools wrapper to trim the reads
out, \
err, \
sample[read_type][depth][read_pair].reverse_reads.sample_call = bbtools \
.reformat_reads(forward_in=sample[read_type][depth][read_pair].reverse_reads.fastq,
reverse_in=None,
forward_out=sample[read_type][depth][read_pair].reverse_reads[fastq_type],
returncmd=True,
**{'ziplevel': '9',
'forcetrimright':
sample[read_type][depth][read_pair].reverse_reads.length,
'tossbrokenreads': 't',
'tossjunk': 't',
'Xmx': self.mem
})
# # Remove the untrimmed reads
# try:
# os.remove(sample[read_type][depth][read_pair].reverse_reads.fastq)
# except FileNotFoundError:
# pass
# Update the JSON file
self.write_json(sample)
def read_quality_trim(self):
"""
Perform quality trim, and toss reads below appropriate thresholds
"""
logging.info('Quality trim')
for sample in self.metadata:
sample.sampled_reads = GenObject()
sample.sampled_reads.outputdir = os.path.join(sample.outputdir, 'sampled')
sample.sampled_reads.trimmed_dir = os.path.join(sample.sampled_reads.outputdir, 'qualitytrimmed_reads')
make_path(sample.sampled_reads.trimmed_dir)
for depth in self.read_depths:
# Create the depth GenObject
setattr(sample.sampled_reads, depth, GenObject())
# Set the depth and output directory attributes for the depth GenObject
sample.sampled_reads[depth].depth = depth
sample.sampled_reads[depth].depth_dir = os.path.join(sample.sampled_reads.outputdir, depth)
# Create the output directory
make_path(sample.sampled_reads[depth].depth_dir)
for read_pair in self.read_lengths:
# Create the read_pair GenObject within the depth GenObject
setattr(sample.sampled_reads[depth], read_pair, GenObject())
# Set and create the output directory
sample.sampled_reads[depth][read_pair].outputdir = \
os.path.join(sample.sampled_reads[depth].depth_dir, read_pair)
make_path(sample.sampled_reads[depth][read_pair].outputdir)
# Create both forward_reads and reverse_reads sub-GenObjects
sample.sampled_reads[depth][read_pair].forward_reads = GenObject()
sample.sampled_reads[depth][read_pair].reverse_reads = GenObject()
sample.sampled_reads[depth][read_pair].trimmed_dir = \
os.path.join(sample.sampled_reads.trimmed_dir,
read_pair)
make_path(sample.sampled_reads[depth][read_pair].trimmed_dir)
# Extract the forward and reverse reads lengths from the read_pair variable
sample.sampled_reads[depth][read_pair].forward_reads.length, \
sample.sampled_reads[depth][read_pair].reverse_reads.length = read_pair.split('_')
logging.info('Performing quality trimming on reads from sample {name} at depth {depth} '
'for minimum read length {forward}'
.format(name=sample.name,
depth=depth,
forward=sample.sampled_reads[depth][read_pair].forward_reads.length))
# Set the attributes for the trimmed forward and reverse reads to use for subsampling
sample.sampled_reads[depth][read_pair].trimmed_forwardfastq = \
os.path.join(sample.sampled_reads[depth][read_pair].trimmed_dir,
'{name}_{length}_R1.fastq.gz'
.format(name=sample.name,
length=sample.sampled_reads[depth][read_pair].forward_reads.length))
sample.sampled_reads[depth][read_pair].trimmed_reversefastq = \
os.path.join(sample.sampled_reads[depth][read_pair].trimmed_dir,
'{name}_{length}_R2.fastq.gz'
.format(name=sample.name,
length=sample.sampled_reads[depth][read_pair].forward_reads.length))
# Create the trimmed output directory attribute
sample.sampled_reads[depth][read_pair].sampled_trimmed_outputdir \
= os.path.join(sample.sampled_reads[depth][read_pair].outputdir,
'sampled_trimmed')
# Set the name of the forward trimmed reads - include the depth and read length information
# This is set now, as the untrimmed files will be removed, and a check is necessary
sample.sampled_reads[depth][read_pair].forward_reads.trimmed_sampled_fastq = \
os.path.join(sample.sampled_reads[depth][read_pair].sampled_trimmed_outputdir,
'{name}_sampled_{depth}_{read_pair}_R1.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Reverse reads
sample.sampled_reads[depth][read_pair].reverse_reads.trimmed_sampled_fastq = \
os.path.join(sample.sampled_reads[depth][read_pair].sampled_trimmed_outputdir,
'{name}_sampled_{depth}_{read_pair}_R2.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Sample if the forward output file does not already exist
if not os.path.isfile(sample.sampled_reads[depth][read_pair].trimmed_forwardfastq) and \
not os.path.isfile(
sample.sampled_reads[depth][read_pair].forward_reads.trimmed_sampled_fastq):
out, \
err, \
sample.sampled_reads[depth][read_pair].sample_cmd = \
bbtools.bbduk_trim(forward_in=sample.forward_fastq,
forward_out=sample.sampled_reads[depth][read_pair]
.trimmed_forwardfastq,
reverse_in=sample.reverse_fastq,
reverse_out=sample.sampled_reads[depth][read_pair]
.trimmed_reversefastq,
minlength=sample.sampled_reads[depth][read_pair]
.forward_reads.length,
forcetrimleft=0,
returncmd=True,
**{'ziplevel': '9',
'Xmx': self.mem})
# Update the JSON file
self.write_json(sample)
def sample_reads(self):
"""
For each PacBio assembly, sample reads from corresponding FASTQ files for appropriate forward and reverse
lengths and sequencing depths using reformat.sh from the bbtools suite
"""
logging.info('Read sampling')
for sample in self.metadata:
# Iterate through all the desired depths of coverage
for depth in self.read_depths:
for read_pair in self.read_lengths:
# Set the name of the output directory
sample.sampled_reads[depth][read_pair].sampled_outputdir \
= os.path.join(sample.sampled_reads[depth][read_pair].outputdir, 'sampled')
# Set the name of the forward reads - include the depth and read length information
sample.sampled_reads[depth][read_pair].forward_reads.fastq = \
os.path.join(sample.sampled_reads[depth][read_pair].sampled_outputdir,
'{name}_{depth}_{read_pair}_R1.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Reverse reads
sample.sampled_reads[depth][read_pair].reverse_reads.fastq = \
os.path.join(sample.sampled_reads[depth][read_pair].sampled_outputdir,
'{name}_{depth}_{read_pair}_R2.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
logging.info(
'Sampling {num_reads} paired reads for sample {name} with the following parameters:\n'
'depth {dp}, forward reads {fl}bp, and reverse reads {rl}bp'
.format(num_reads=sample.simulated_reads[depth][read_pair].num_reads,
dp=depth,
name=sample.name,
fl=sample.sampled_reads[depth][read_pair].forward_reads.length,
rl=sample.sampled_reads[depth][read_pair].reverse_reads.length))
# Use the reformat method in the OLCTools bbtools wrapper
# Note that upsample=t is used to ensure that the target number of reads (samplereadstarget) is met
if not os.path.isfile(sample.sampled_reads[depth][read_pair].forward_reads.trimmed_sampled_fastq):
out, \
err, \
sample.sampled_reads[depth][read_pair].sample_call = bbtools \
.reformat_reads(forward_in=sample.sampled_reads[depth][read_pair].trimmed_forwardfastq,
reverse_in=sample.sampled_reads[depth][read_pair].trimmed_reversefastq,
forward_out=sample.sampled_reads[depth][read_pair].forward_reads.fastq,
reverse_out=sample.sampled_reads[depth][read_pair].reverse_reads.fastq,
returncmd=True,
**{'samplereadstarget': sample.simulated_reads[depth][read_pair].num_reads,
'upsample': 't',
'minlength':
sample.sampled_reads[depth][read_pair].forward_reads.length,
'ziplevel': '9',
'tossbrokenreads': 't',
'tossjunk': 't',
'Xmx': self.mem
}
)
# # Remove the trimmed reads, as they are no longer necessary
# try:
# os.remove(sample.sampled_reads[depth][read_pair].trimmed_forwardfastq)
# os.remove(sample.sampled_reads[depth][read_pair].trimmed_reversefastq)
# except FileNotFoundError:
# pass
# Update the JSON file
self.write_json(sample)
def link_reads(self, analysistype):
"""
Create folders with relative symlinks to the desired simulated/sampled reads. These folders will contain all
the reads created for each sample, and will be processed with GeneSippr and COWBAT pipelines
:param analysistype: Current analysis type. Will either be 'simulated' or 'sampled'
"""
logging.info('Linking {at} reads'.format(at=analysistype))
for sample in self.metadata:
# Create the output directories
genesippr_dir = os.path.join(self.path, 'genesippr', sample.name)
sample.genesippr_dir = genesippr_dir
make_path(genesippr_dir)
cowbat_dir = os.path.join(self.path, 'cowbat', sample.name)
sample.cowbat_dir = cowbat_dir
make_path(cowbat_dir)
# Iterate through all the desired depths of coverage
for depth in self.read_depths:
for read_pair in self.read_lengths:
# Create variables using the analysis type. These will be used in setting GenObject attributes
read_type = '{at}_reads'.format(at=analysistype)
fastq_type = 'trimmed_{at}_fastq'.format(at=analysistype)
# Link reads to both output directories
for output_dir in [genesippr_dir, cowbat_dir]:
# If the original reads are shorter than the specified read length, the FASTQ files will exist,
# but will be empty. Do not create links for these files
size = os.path.getsize(sample[read_type][depth][read_pair].forward_reads[fastq_type])
if size > 20:
# Create relative symlinks to the FASTQ files - use the relative path from the desired
# output directory to the read storage path e.g.
# ../../2013-SEQ-0072/simulated/40/50_150/simulated_trimmed/2013-SEQ-0072_simulated_40_50_150_R1.fastq.gz
# is the relative path to the output_dir. The link name is the base name of the reads
# joined to the desired output directory e.g.
# output_dir/2013-SEQ-0072/2013-SEQ-0072_simulated_40_50_150_R1.fastq.gz
relative_symlink(sample[read_type][depth][read_pair].forward_reads[fastq_type],
output_dir)
# Original FASTQ files
relative_symlink(sample.forward_fastq,
output_dir)
relative_symlink(sample.reverse_fastq,
output_dir)
# Reverse reads
try:
size = os.path.getsize(sample[read_type][depth][read_pair].reverse_reads[fastq_type])
if size > 20:
relative_symlink(sample[read_type][depth][read_pair].reverse_reads[fastq_type],
output_dir)
except FileNotFoundError:
pass
def run_genesippr(self):
"""
Run GeneSippr on each of the samples
"""
from pathlib import Path
home = str(Path.home())
logging.info('GeneSippr')
# These unfortunate hard coded paths appear to be necessary
miniconda_path = os.path.join(home, 'miniconda3')
miniconda_path = miniconda_path if os.path.isdir(miniconda_path) else os.path.join(home, 'miniconda')
logging.debug(miniconda_path)
activate = 'source {mp}/bin/activate {mp}/envs/sipprverse'.format(mp=miniconda_path)
sippr_path = '{mp}/envs/sipprverse/bin/sippr.py'.format(mp=miniconda_path)
for sample in self.metadata:
logging.info(sample.name)
# Run the pipeline. Check to make sure that the serosippr report, which is created last doesn't exist
if not os.path.isfile(os.path.join(sample.genesippr_dir, 'reports', 'genesippr.csv')):
cmd = 'python {py_path} -o {outpath} -s {seqpath} -r {refpath} -F'\
.format(py_path=sippr_path,
outpath=sample.genesippr_dir,
seqpath=sample.genesippr_dir,
refpath=self.referencefilepath
)
logging.critical(cmd)
# Create another shell script to execute within the PlasmidExtractor conda environment
template = "#!/bin/bash\n{activate} && {cmd}".format(activate=activate,
cmd=cmd)
genesippr_script = os.path.join(sample.genesippr_dir, 'run_genesippr.sh')
with open(genesippr_script, 'w+') as file:
file.write(template)
# Modify the permissions of the script to allow it to be run on the node
self.make_executable(genesippr_script)
# Run shell script
os.system('/bin/bash {}'.format(genesippr_script))
# quit()
def parse_genesippr(self):
"""
"""
import pandas
for sample in self.metadata:
sample.genesippr_reports = sorted(glob(os.path.join(sample.genesippr_dir, 'reports', '*.csv')))
for report in sample.genesippr_reports:
# Extract the analysis type from the report name
report_name = os.path.splitext(os.path.basename(report))[0]
# A dictionary to store the parsed CSV file in a more readable format
nesteddictionary = dict()
# Use pandas to read in the CSV file, and subsequently convert the pandas data frame to a dictionary
# (.to_dict()).
dictionary = pandas.read_csv(report).to_dict()
# Iterate through the dictionary - each header from the CSV file
for header in dictionary:
# primary_key is the primary key, and value is the value of the cell for that
# primary key + header combination
for primary_key, value in dictionary[header].items():
# Update the dictionary with the new data
try:
nesteddictionary[primary_key].update({header: value})
# Create the nested dictionary if it hasn't been created yet
except KeyError:
nesteddictionary[primary_key] = dict()
nesteddictionary[primary_key].update({header: value})
#
strain = str()
for name, value in nesteddictionary.items():
# As strain name is not printed on every line, it is entered as 'nan' by pandas. This is a float.
if type(value['Strain']) is not float:
strain = value['Strain']
# Find the 'original' sample
if len(strain.split('_')) > 1:
strain, analysis_type, depth, forward_length, reverse_length = strain.split('_')
print(strain, analysis_type, depth, forward_length, reverse_length)
else:
print(strain)
@staticmethod
def make_executable(path):
"""
Takes a shell script and makes it executable (chmod +x)
:param path: path to shell script
"""
mode = os.stat(path).st_mode
mode |= (mode & 0o444) >> 2
os.chmod(path, mode)
# def run_cowbat(self):
# """
# Run COWBAT on all the samples
# """
# logging.info('COWBAT')
# # Create a MetadataObject to spoof ArgumentParser supplied arguments
# args = MetadataObject()
# args.referencefilepath = self.referencefilepath
# args.numreads = 2
# args.preprocess = False
# args.startingtime = self.start
# args.customsamplesheet = False
# args.threads = multiprocessing.cpu_count() - 1
# args.commit = b''
# args.homepath = ''
# for sample in self.metadata:
# args.sequencepath = sample.cowbat_dir
# # Run the pipeline
# cowbat = RunAssemble(args)
# cowbat.main()
def __init__(self, start, path, referencefilepath, debug):
"""
:param start: Time at which the analyses were started
:param path: Location in which analyses are to be performed
:param referencefilepath: Location of reference database
:param debug: Boolean for whether debug level logging enabled, and whether a toy dataset should be used
"""
self.start = start
self.path = os.path.join(path)
self.referencefilepath = os.path.join(referencefilepath)
self.fastapath = os.path.join(self.path, 'fasta')
self.fastqpath = os.path.join(self.path, 'fastq')
self.debug = debug
if self.debug:
self.read_lengths = ['50_0']
self.read_depths = ['10']
else:
self.read_lengths = ['50_0', '50_50', '50_75', '50_100', '50_150', '50_250', '50_300',
'75_0', '75_50', '75_75', '75_100', '75_150', '75_250', '75_300',
'100_0', '100_50', '100_75', '100_100', '100_150', '100_250', '100_300',
'150_0', '150_50', '150_75', '150_100', '150_150', '150_250', '150_300',
'250_0', '250_50', '250_75', '250_100', '250_150', '250_250', '250_300',
'300_0', '300_50', '300_75', '300_100', '300_150', '300_250', '300_300']
self.read_depths = ['10', '20', '30', '40', '50']
self.straindict = dict()
self.strainset = set()
self.metadata = list()
self.mem = int(0.85 * float(psutil.virtual_memory().total))
if __name__ == '__main__':
# Parser for arguments
parser = ArgumentParser(description='Perform FASTQ read simulations and sampling from assembled genomes, and'
'corresponding FASTQ reads, respectively')
parser.add_argument('-p', '--path',
required=True,
help='Path to folder containing subdirectories with assemblies, and raw reads')
parser.add_argument('-r', '--referencefilepath',
required=True,
help='Provide the location of the folder containing the pipeline accessory files (reference '
'genomes, MLST data, etc.')
parser.add_argument('-d', '--debug',
default=False,
action='store_true',
help='Run the pipeline in debug mode. Will enable more logging. Currently will use a '
'greatly decreased range of read lengths and read depths')
# Get the arguments into an object
arguments = parser.parse_args()
SetupLogging(debug=arguments.debug)
arguments.start = time()
prep = ReadPrep(start=arguments.start,
path=arguments.path,
referencefilepath=arguments.referencefilepath,
debug=arguments.debug)
prep.main()
logging.info('Analyses Complete!')
| mit |
rjw57/vagrant-ipython | ipython/profile_default/ipython_kernel_config.py | 1 | 14950 | # Configuration file for ipython-kernel.
c = get_config()
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: BaseIPythonApplication, Application,
# InteractiveShellApp, ConnectionFileMixin
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# The importstring for the OutStream factory
# c.IPKernelApp.outstream_class = 'IPython.kernel.zmq.iostream.OutStream'
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# set the stdin (ROUTER) port [default: random]
# c.IPKernelApp.stdin_port = 0
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = ''
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
# A file to be run
# c.IPKernelApp.file_to_run = ''
# Run the module as a script.
# c.IPKernelApp.module_to_run = ''
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPKernelApp.verbose_crash = False
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.IPKernelApp.gui = None
# redirect stdout to the null device
# c.IPKernelApp.no_stdout = False
# ONLY USED ON WINDOWS Interrupt this process when the parent is signaled.
# c.IPKernelApp.interrupt = 0
# Set the log level by value or name.
# c.IPKernelApp.log_level = 30
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.IPKernelApp.pylab_import_all = True
# lines of code to run at IPython startup.
# c.IPKernelApp.exec_lines = []
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.IPKernelApp.pylab = None
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPKernelApp.extra_config_file = ''
# set the shell (ROUTER) port [default: random]
# c.IPKernelApp.shell_port = 0
# The IPython profile to use.
# c.IPKernelApp.profile = 'default'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.IPKernelApp.connection_file = ''
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.IPKernelApp.exec_PYTHONSTARTUP = True
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.IPKernelApp.hide_initial_ns = True
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# Whether to overwrite existing config files when copying
# c.IPKernelApp.overwrite = False
# kill this process if its parent dies. On Windows, the argument specifies the
# HANDLE of the parent process, otherwise it is simply boolean.
# c.IPKernelApp.parent_handle = 0
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.IPKernelApp.matplotlib = None
# The Kernel subclass to be used.
#
# This should allow easy re-use of the IPKernelApp entry point to configure and
# launch kernels other than IPython's own.
# c.IPKernelApp.kernel_class = <class 'IPython.kernel.zmq.ipkernel.IPythonKernel'>
#
# c.IPKernelApp.transport = 'tcp'
# set the control (ROUTER) port [default: random]
# c.IPKernelApp.control_port = 0
# dotted module name of an IPython extension to load.
# c.IPKernelApp.extra_extension = ''
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.IPKernelApp.ip = ''
# set the heartbeat port [default: random]
# c.IPKernelApp.hb_port = 0
# The date format used by logging formatters for %(asctime)s
# c.IPKernelApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# A list of dotted module names of IPython extensions to load.
# c.IPKernelApp.extensions = []
# The importstring for the DisplayHook factory
# c.IPKernelApp.displayhook_class = 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'
# Whether to create profile dir if it doesn't exist
# c.IPKernelApp.auto_create = False
#------------------------------------------------------------------------------
# IPythonKernel configuration
#------------------------------------------------------------------------------
# IPythonKernel will inherit config from: Kernel
#
# c.IPythonKernel._execute_sleep = 0.0005
#
# c.IPythonKernel._poll_interval = 0.05
# Whether to use appnope for compatiblity with OS X App Nap.
#
# Only affects OS X >= 10.9.
# c.IPythonKernel._darwin_app_nap = True
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShell.show_rewritten_input = True
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'Linux'
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
#
# c.ZMQInteractiveShell.ipython_dir = ''
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQInteractiveShell.ast_transformers = []
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.ZMQInteractiveShell.separate_out2 = ''
# Start logging to the given file in append mode.
# c.ZMQInteractiveShell.logappend = ''
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.ZMQInteractiveShell.display_page = False
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
#
# c.ZMQInteractiveShell.separate_in = '\n'
#
# c.ZMQInteractiveShell.object_info_string_level = 0
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'In [\\#]: '
# The part of the banner to be printed after the profile
# c.ZMQInteractiveShell.banner2 = ''
#
# c.ZMQInteractiveShell.history_length = 10000
# The part of the banner to be printed before the profile
# c.ZMQInteractiveShell.banner1 = 'Python 3.4.0 (default, Apr 11 2014, 13:05:18) \nType "copyright", "credits" or "license" for more information.\n\nIPython 3.0.0-rc1 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
#
# c.ZMQInteractiveShell.separate_out = ''
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.ZMQInteractiveShell.logstart = False
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
#
# c.ZMQInteractiveShell.xmode = 'Context'
#
# c.ZMQInteractiveShell.debug = False
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
#
# c.ZMQInteractiveShell.quiet = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# The UUID identifying this session.
# c.Session.session = ''
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# path to file containing execution key.
# c.Session.keyfile = ''
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
# Debug output in the Session
# c.Session.debug = False
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# Username for the Session. Default is your system username.
# c.Session.username = 'vagrant'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# execution key, for extra authentication.
# c.Session.key = b''
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
| mit |
moutai/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
plaes/numpy | numpy/lib/twodim_base.py | 2 | 22946 | """ Basic functions for manipulating 2d arrays
"""
__all__ = ['diag','diagflat','eye','fliplr','flipud','rot90','tri','triu',
'tril','vander','histogram2d','mask_indices',
'tril_indices','tril_indices_from','triu_indices','triu_indices_from',
]
from numpy.core.numeric import asanyarray, equal, subtract, arange, \
zeros, greater_equal, multiply, ones, asarray, alltrue, where, \
empty
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Does not require the array to be
two-dimensional.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(numpy.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError, "Input must be >= 2-d."
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError, "Input must be >= 1-d."
return m[::-1,...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError, "Input must >= 2-d."
k = k % 4
if k == 0: return m
elif k == 1: return fliplr(m).swapaxes(0,1)
elif k == 2: return fliplr(flipud(m))
else: return fliplr(m.swapaxes(0,1)) # k==3
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 refers to the main diagonal, a positive value
refers to an upper diagonal, and a negative value to a lower diagonal.
dtype : dtype, optional
Data-type of the returned array.
Returns
-------
I : ndarray (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
diag : Return a diagonal 2-D array using a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triange of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n,n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
if k >= s[1]:
return empty(0, dtype=v.dtype)
if v.flags.f_contiguous:
# faster slicing
v, k, s = v.T, -k, s[::-1]
if k >= 0:
i = k
else:
i = (-k) * s[1]
return v[:s[1]-k].flat[i::s[1]+1]
else:
raise ValueError, "Input must be 1- or 2-d."
def diagflat(v,k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set. The default is 0.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : Matlab workalike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n,n), v.dtype)
if (k>=0):
i = arange(0,n-k)
fi = i+k+i*n
else:
i = arange(0,n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
Construct an array filled with ones at and below the given diagonal.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
T : (N,M) ndarray
Array with a lower triangle filled with ones, in other words
``T[i,j] == 1`` for ``i <= j + k``.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None: M = N
m = greater_equal(subtract.outer(arange(N), arange(M)),-k)
return m.astype(dtype)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int
Diagonal above which to zero elements.
`k = 0` is the main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
L : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
out = multiply(tri(m.shape[0], m.shape[1], k=k, dtype=int),m)
return out
def triu(m, k=0):
"""
Upper triangle of an array.
Construct a copy of a matrix with elements below the k-th diagonal zeroed.
Please refer to the documentation for `tril`.
See Also
--------
tril
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
out = multiply((1-tri(m.shape[0], m.shape[1], k-1, int)),m)
return out
# borrowed from John Hunter and matplotlib
def vander(x, N=None):
"""
Generate a Van der Monde matrix.
The columns of the output matrix are decreasing powers of the input
vector. Specifically, the i-th output column is the input vector to
the power of ``N - i - 1``. Such a matrix with a geometric progression
in each row is named Van Der Monde, or Vandermonde matrix, from
Alexandre-Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Order of (number of columns in) the output. If `N` is not specified,
a square array is returned (``N = len(x)``).
Returns
-------
out : ndarray
Van der Monde matrix of order `N`. The first column is ``x^(N-1)``,
the second ``x^(N-2)`` and so forth.
References
----------
.. [1] Wikipedia, "Vandermonde matrix",
http://en.wikipedia.org/wiki/Vandermonde_matrix
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if N is None: N=len(x)
X = ones( (len(x),N), x.dtype)
for i in range(N-1):
X[:,i] = x**(N-i-1)
return X
def histogram2d(x,y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape(N,)
A sequence of values to be histogrammed along the first dimension.
y : array_like, shape(M,)
A sequence of values to be histogrammed along the second dimension.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If [int, int], the number of bins in each dimension (nx, ny = bins).
* If array_like, the bin edges for the two dimensions (x_edges=y_edges=bins).
* If [array, array], the bin edges in each dimension (x_edges, y_edges = bins).
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True, returns
the bin density, i.e. the bin count divided by the bin area.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. Weights
are normalized to 1 if `normed` is True. If `normed` is False, the
values of the returned histogram are equal to the sum of the weights
belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram: 1D histogram
histogramdd: Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample density,
defined such that:
.. math::
\\sum_{i=0}^{nx-1} \\sum_{j=0}^{ny-1} H_{i,j} \\Delta x_i \\Delta y_j = 1
where `H` is the histogram array and :math:`\\Delta x_i \\Delta y_i`
the area of bin `{i,j}`.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abcissa and `y` values on the ordinate axis.
Rather, `x` is histogrammed along the first dimension of the array
(vertical), and `y` along the second dimension of the array (horizontal).
This ensures compatibility with `histogramdd`.
Examples
--------
>>> x, y = np.random.randn(2, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(5, 8))
>>> H.shape, xedges.shape, yedges.shape
((5,8), (6,), (9,))
We can now use the Matplotlib to visualize this 2-dimensional histogram:
>>> extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
>>> import matplotlib.pyplot as plt
>>> plt.imshow(H, extent=extent)
<matplotlib.image.AxesImage object at ...>
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x,y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n,mask_func,k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n,n),int)
a = mask_func(m,k)
return where(a != 0)
def tril_indices(n,k=0):
"""
Return the indices for the lower-triangle of an (n, n) array.
Parameters
----------
n : int
Sets the size of the arrays for which the returned indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return mask_indices(n,tril,k)
def tril_indices_from(arr,k=0):
"""
Return the indices for the lower-triangle of an (n, n) array.
See `tril_indices` for full details.
Parameters
----------
n : int
Sets the size of the arrays for which the returned indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if not arr.ndim==2 and arr.shape[0] == arr.shape[1]:
raise ValueError("input array must be 2-d and square")
return tril_indices(arr.shape[0],k)
def triu_indices(n,k=0):
"""
Return the indices for the upper-triangle of an (n, n) array.
Parameters
----------
n : int
Sets the size of the arrays for which the returned indices will be valid.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return mask_indices(n,triu,k)
def triu_indices_from(arr,k=0):
"""
Return the indices for the lower-triangle of an (n, n) array.
See `triu_indices` for full details.
Parameters
----------
n : int
Sets the size of the arrays for which the returned indices will be valid.
k : int, optional
Diagonal offset (see `triu` for details).
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if not arr.ndim==2 and arr.shape[0] == arr.shape[1]:
raise ValueError("input array must be 2-d and square")
return triu_indices(arr.shape[0],k)
| bsd-3-clause |
juliebehr/gaff2xml | openmoltools/tests/test_openeye.py | 1 | 11143 | from nose.plugins.attrib import attr
import simtk.unit as u
from simtk.openmm import app
import simtk.openmm as mm
import numpy as np
from mdtraj.testing import eq
from unittest import skipIf
from openmoltools import utils, packmol
import os
import openmoltools.openeye
import pandas as pd
import mdtraj as md
from mdtraj.testing import raises
smiles_fails_with_strictStereo = "CN1CCN(CC1)CCCOc2cc3c(cc2OC)C(=[NH+]c4cc(c(cc4Cl)Cl)OC)C(=C=[N-])C=[NH+]3"
try:
oechem = utils.import_("openeye.oechem")
if not oechem.OEChemIsLicensed(): raise(ImportError("Need License for OEChem!"))
oequacpac = utils.import_("openeye.oequacpac")
if not oequacpac.OEQuacPacIsLicensed(): raise(ImportError("Need License for oequacpac!"))
oeiupac = utils.import_("openeye.oeiupac")
if not oeiupac.OEIUPACIsLicensed(): raise(ImportError("Need License for OEOmega!"))
oeomega = utils.import_("openeye.oeomega")
if not oeomega.OEOmegaIsLicensed(): raise(ImportError("Need License for OEOmega!"))
HAVE_OE = True
except Exception as e:
HAVE_OE = False
openeye_exception_message = str(e)
try:
import parmed
HAVE_PARMED = True
except ImportError:
HAVE_PARMED = False
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.\n" + openeye_exception_message)
def test_butanol_keepconfs():
m0 = openmoltools.openeye.iupac_to_oemol("butanol")
m1 = openmoltools.openeye.get_charges(m0, keep_confs=1)
eq(m0.NumAtoms(), m1.NumAtoms())
assert m1.NumConfs() == 1, "This OEMol was created to have a single conformation."
assert m1.NumAtoms() == 15, "Butanol should have 15 atoms"
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_butanol():
m0 = openmoltools.openeye.iupac_to_oemol("butanol")
m1 = openmoltools.openeye.get_charges(m0)
eq(m0.NumAtoms(), m1.NumAtoms())
assert m1.NumConfs() >= 2, "Butanol should have multiple conformers."
assert m1.NumAtoms() == 15, "Butanol should have 15 atoms"
all_data = {}
for k, molecule in enumerate(m1.GetConfs()):
names_to_charges, str_repr = openmoltools.openeye.get_names_to_charges(molecule)
all_data[k] = names_to_charges
eq(sum(names_to_charges.values()), 0.0, decimal=7) # Net charge should be zero
# Build a table of charges indexed by conformer number and atom name
all_data = pd.DataFrame(all_data)
# The standard deviation along the conformer axis should be zero if all conformers have same charges
eq(all_data.std(1).values, np.zeros(m1.NumAtoms()), decimal=7)
with utils.enter_temp_directory():
# Try saving to disk as mol2
openmoltools.openeye.molecule_to_mol2(m1, "out.mol2")
# Make sure MDTraj can read the output
t = md.load("out.mol2")
# Make sure MDTraj can read the charges / topology info
atoms, bonds = md.formats.mol2.mol2_to_dataframes("out.mol2")
# Finally, make sure MDTraj and OpenEye report the same charges.
names_to_charges, str_repr = openmoltools.openeye.get_names_to_charges(m1)
q = atoms.set_index("name").charge
q0 = pd.Series(names_to_charges)
delta = q - q0 # An object containing the charges, with atom names as indices
eq(delta.values, np.zeros_like(delta.values), decimal=4)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_benzene():
m0 = openmoltools.openeye.iupac_to_oemol("benzene")
m1 = openmoltools.openeye.get_charges(m0)
eq(m0.NumAtoms(), m1.NumAtoms())
print(m1.NumConfs())
assert m1.NumConfs() == 1, "Benezene should have 1 conformer"
assert m1.NumAtoms() == 12, "Benezene should have 12 atoms"
names_to_charges, str_repr = openmoltools.openeye.get_names_to_charges(m1)
eq(sum(names_to_charges.values()), 0.0, decimal=7) # Net charge should be zero
with utils.enter_temp_directory():
# Try saving to disk as mol2
openmoltools.openeye.molecule_to_mol2(m1, "out.mol2")
# Make sure MDTraj can read the output
t = md.load("out.mol2")
# Make sure MDTraj can read the charges / topology info
atoms, bonds = md.formats.mol2.mol2_to_dataframes("out.mol2")
# Finally, make sure MDTraj and OpenEye report the same charges.
names_to_charges, str_repr = openmoltools.openeye.get_names_to_charges(m1)
q = atoms.set_index("name").charge
q0 = pd.Series(names_to_charges)
delta = q - q0 # An object containing the charges, with atom names as indices
eq(delta.values, np.zeros_like(delta.values), decimal=4)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_link_in_utils():
m0 = openmoltools.openeye.iupac_to_oemol("benzene")
m1 = openmoltools.openeye.get_charges(m0)
with utils.enter_temp_directory():
# This function was moved from utils to openeye, so check that the old link still works.
utils.molecule_to_mol2(m1, "out.mol2")
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_smiles():
m0 = openmoltools.openeye.smiles_to_oemol("CCCCO")
charged0 = openmoltools.openeye.get_charges(m0)
m1 = openmoltools.openeye.iupac_to_oemol("butanol")
charged1 = openmoltools.openeye.get_charges(m1)
eq(charged0.NumAtoms(), charged1.NumAtoms())
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_ffxml():
with utils.enter_temp_directory():
m0 = openmoltools.openeye.smiles_to_oemol("CCCCO")
charged0 = openmoltools.openeye.get_charges(m0)
m1 = openmoltools.openeye.smiles_to_oemol("ClC(Cl)(Cl)Cl")
charged1 = openmoltools.openeye.get_charges(m1)
trajectories, ffxml = openmoltools.openeye.oemols_to_ffxml([charged0, charged1])
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_ffxml_simulation():
"""Test converting toluene and benzene smiles to oemol to ffxml to openmm simulation."""
with utils.enter_temp_directory():
m0 = openmoltools.openeye.smiles_to_oemol("Cc1ccccc1")
charged0 = openmoltools.openeye.get_charges(m0)
m1 = openmoltools.openeye.smiles_to_oemol("c1ccccc1")
charged1 = openmoltools.openeye.get_charges(m1)
ligands = [charged0, charged1]
n_atoms = [15,12]
trajectories, ffxml = openmoltools.openeye.oemols_to_ffxml(ligands)
eq(len(trajectories),len(ligands))
pdb_filename = utils.get_data_filename("chemicals/proteins/1vii.pdb")
temperature = 300 * u.kelvin
friction = 0.3 / u.picosecond
timestep = 0.01 * u.femtosecond
protein_traj = md.load(pdb_filename)
protein_traj.center_coordinates()
protein_top = protein_traj.top.to_openmm()
protein_xyz = protein_traj.openmm_positions(0)
for k, ligand in enumerate(ligands):
ligand_traj = trajectories[k]
ligand_traj.center_coordinates()
eq(ligand_traj.n_atoms, n_atoms[k])
eq(ligand_traj.n_frames, 1)
#Move the pre-centered ligand sufficiently far away from the protein to avoid a clash.
min_atom_pair_distance = ((ligand_traj.xyz[0] ** 2.).sum(1) ** 0.5).max() + ((protein_traj.xyz[0] ** 2.).sum(1) ** 0.5).max() + 0.3
ligand_traj.xyz += np.array([1.0, 0.0, 0.0]) * min_atom_pair_distance
ligand_xyz = ligand_traj.openmm_positions(0)
ligand_top = ligand_traj.top.to_openmm()
ffxml.seek(0)
forcefield = app.ForceField("amber10.xml", ffxml, "tip3p.xml")
model = app.modeller.Modeller(protein_top, protein_xyz)
model.add(ligand_top, ligand_xyz)
model.addSolvent(forcefield, padding=0.4 * u.nanometer)
system = forcefield.createSystem(model.topology, nonbondedMethod=app.PME, nonbondedCutoff=1.0 * u.nanometers, constraints=app.HAngles)
integrator = mm.LangevinIntegrator(temperature, friction, timestep)
simulation = app.Simulation(model.topology, system, integrator)
simulation.context.setPositions(model.positions)
print("running")
simulation.step(1)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
@raises(RuntimeError)
def test_charge_fail1():
with utils.enter_temp_directory():
openmoltools.openeye.smiles_to_antechamber(smiles_fails_with_strictStereo, "test.mol2", "test.frcmod", strictStereo=True)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
@raises(RuntimeError)
def test_charge_fail2():
m = openmoltools.openeye.smiles_to_oemol(smiles_fails_with_strictStereo)
m = openmoltools.openeye.get_charges(m, strictStereo=True, keep_confs=1)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_charge_success1():
with utils.enter_temp_directory():
openmoltools.openeye.smiles_to_antechamber(smiles_fails_with_strictStereo, "test.mol2", "test.frcmod", strictStereo=False)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_charge_success2():
m = openmoltools.openeye.smiles_to_oemol(smiles_fails_with_strictStereo)
m = openmoltools.openeye.get_charges(m, strictStereo=False)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
@skipIf(not HAVE_PARMED, "Cannot test without Parmed Chemistry.")
@skipIf(packmol.PACKMOL_PATH is None, "Skipping testing of packmol conversion because packmol not found.")
@attr("parmed")
def test_binary_mixture_rename():
smiles_string0 = "CCCCCC"
smiles_string1 = "CCCCCCCCC"
with utils.enter_temp_directory(): # Prevents creating tons of GAFF files everywhere.
mol2_filename0 = "./A.mol2"
frcmod_filename0 = "./A.frcmod"
mol2_filename1 = "./B.mol2"
frcmod_filename1 = "./B.frcmod"
gaff_mol2_filenames = [mol2_filename0, mol2_filename1]
frcmod_filenames = [frcmod_filename0, frcmod_filename1]
prmtop_filename = "./box.prmtop"
inpcrd_filename = "./box.inpcrd"
openmoltools.openeye.smiles_to_antechamber(smiles_string0, mol2_filename0, frcmod_filename0)
openmoltools.openeye.smiles_to_antechamber(smiles_string1, mol2_filename1, frcmod_filename1)
openmoltools.utils.randomize_mol2_residue_names(gaff_mol2_filenames)
box_pdb_filename = "./box.pdb"
gaff_mol2_filenames = [mol2_filename0, mol2_filename1]
n_monomers = [10, 20]
packed_trj = packmol.pack_box([md.load(mol2) for mol2 in gaff_mol2_filenames], n_monomers)
packed_trj.save(box_pdb_filename)
tleap_cmd = openmoltools.amber.build_mixture_prmtop(gaff_mol2_filenames, frcmod_filenames, box_pdb_filename, prmtop_filename, inpcrd_filename)
prmtop = app.AmberPrmtopFile(prmtop_filename)
inpcrd = app.AmberInpcrdFile(inpcrd_filename)
system = prmtop.createSystem(nonbondedMethod=app.PME, nonbondedCutoff=1.0*u.nanometers, constraints=app.HBonds)
| gpl-2.0 |
wdurhamh/statsmodels | statsmodels/regression/mixed_linear_model.py | 19 | 91253 | """
Linear mixed effects models are regression models for dependent data.
They can be used to estimate regression relationships involving both
means and variances.
These models are also known as multilevel linear models, and
hierachical linear models.
The MixedLM class fits linear mixed effects models to data, and
provides support for some common post-estimation tasks. This is a
group-based implementation that is most efficient for models in which
the data can be partitioned into independent groups. Some models with
crossed effects can be handled by specifying a model with a single
group.
The data are partitioned into disjoint groups. The probability model
for group i is:
Y = X*beta + Z*gamma + epsilon
where
* n_i is the number of observations in group i
* Y is a n_i dimensional response vector (called endog in MixedLM)
* X is a n_i x k_fe dimensional design matrix for the fixed effects
(called exog in MixedLM)
* beta is a k_fe-dimensional vector of fixed effects parameters
(called fe_params in MixedLM)
* Z is a design matrix for the random effects with n_i rows (called
exog_re in MixedLM). The number of columns in Z can vary by group
as discussed below.
* gamma is a random vector with mean 0. The covariance matrix for the
first `k_re` elements of `gamma` (called cov_re in MixedLM) is
common to all groups. The remaining elements of `gamma` are
variance components as discussed in more detail below. Each group
receives its own independent realization of gamma.
* epsilon is a n_i dimensional vector of iid normal
errors with mean 0 and variance sigma^2; the epsilon
values are independent both within and between groups
Y, X and Z must be entirely observed. beta, Psi, and sigma^2 are
estimated using ML or REML estimation, and gamma and epsilon are
random so define the probability model.
The marginal mean structure is E[Y | X, Z] = X*beta. If only the mean
structure is of interest, GEE is an alternative to using linear mixed
models.
Two types of random effects are supported. Standard random effects
are correlated with each other in arbitary ways. Every group has the
same number (`k_re`) of standard random effects, with the same joint
distribution (but with independent realizations across the groups).
Variance components are uncorrelated with each other, and with the
standard random effects. Each variance component has mean zero, and
all realizations of a given variance component have the same variance
parameter. The number of realized variance components per variance
parameter can differ across the groups.
The primary reference for the implementation details is:
MJ Lindstrom, DM Bates (1988). "Newton Raphson and EM algorithms for
linear mixed effects models for repeated measures data". Journal of
the American Statistical Association. Volume 83, Issue 404, pages
1014-1022.
See also this more recent document:
http://econ.ucsb.edu/~doug/245a/Papers/Mixed%20Effects%20Implement.pdf
All the likelihood, gradient, and Hessian calculations closely follow
Lindstrom and Bates 1988, adapted to support variance components.
The following two documents are written more from the perspective of
users:
http://lme4.r-forge.r-project.org/lMMwR/lrgprt.pdf
http://lme4.r-forge.r-project.org/slides/2009-07-07-Rennes/3Longitudinal-4.pdf
Notation:
* `cov_re` is the random effects covariance matrix (referred to above
as Psi) and `scale` is the (scalar) error variance. For a single
group, the marginal covariance matrix of endog given exog is scale*I
+ Z * cov_re * Z', where Z is the design matrix for the random
effects in one group.
* `vcomp` is a vector of variance parameters. The length of `vcomp`
is determined by the number of keys in either the `exog_vc` argument
to ``MixedLM``, or the `vc_formula` argument when using formulas to
fit a model.
Notes:
1. Three different parameterizations are used in different places.
The regression slopes (usually called `fe_params`) are identical in
all three parameterizations, but the variance parameters differ. The
parameterizations are:
* The "user parameterization" in which cov(endog) = scale*I + Z *
cov_re * Z', as described above. This is the main parameterization
visible to the user.
* The "profile parameterization" in which cov(endog) = I +
Z * cov_re1 * Z'. This is the parameterization of the profile
likelihood that is maximized to produce parameter estimates.
(see Lindstrom and Bates for details). The "user" cov_re is
equal to the "profile" cov_re1 times the scale.
* The "square root parameterization" in which we work with the Cholesky
factor of cov_re1 instead of cov_re directly. This is hidden from the
user.
All three parameterizations can be packed into a vector by
(optionally) concatenating `fe_params` together with the lower
triangle or Cholesky square root of the dependence structure, followed
by the variance parameters for the variance components. The are
stored as square roots if (and only if) the random effects covariance
matrix is stored as its Choleky factor. Note that when unpacking, it
is important to either square or reflect the dependence structure
depending on which parameterization is being used.
Two score methods are implemented. One takes the score with respect
to the elements of the random effects covariance matrix (used for
inference once the MLE is reached), and the other takes the score with
respect to the parameters of the Choleky square root of the random
effects covariance matrix (used for optimization).
The numerical optimization uses GLS to avoid explicitly optimizing
over the fixed effects parameters. The likelihood that is optimized
is profiled over both the scale parameter (a scalar) and the fixed
effects parameters (if any). As a result of this profiling, it is
difficult and unnecessary to calculate the Hessian of the profiled log
likelihood function, so that calculation is not implemented here.
Therefore, optimization methods requiring the Hessian matrix such as
the Newton-Raphson algorihm cannot be used for model fitting.
"""
import numpy as np
import statsmodels.base.model as base
from scipy.optimize import fmin_ncg, fmin_cg, fmin_bfgs, fmin
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools import data as data_tools
from scipy.stats.distributions import norm
from scipy import sparse
import pandas as pd
import patsy
from statsmodels.compat.collections import OrderedDict
from statsmodels.compat import range
import warnings
from statsmodels.tools.sm_exceptions import ConvergenceWarning
from statsmodels.base._penalties import Penalty
from statsmodels.compat.numpy import np_matrix_rank
from pandas import DataFrame
def _dot(x, y):
"""
Returns the dot product of the arrays, works for sparse and dense.
"""
if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
return np.dot(x, y)
elif sparse.issparse(x):
return x.dot(y)
elif sparse.issparse(y):
return y.T.dot(x.T).T
# From numpy, adapted to work with sparse and dense arrays.
def _multi_dot_three(A, B, C):
"""
Find best ordering for three arrays and do the multiplication.
Doing in manually instead of using dynamic programing is
approximately 15 times faster.
"""
# cost1 = cost((AB)C)
cost1 = (A.shape[0] * A.shape[1] * B.shape[1] + # (AB)
A.shape[0] * B.shape[1] * C.shape[1]) # (--)C
# cost2 = cost((AB)C)
cost2 = (B.shape[0] * B.shape[1] * C.shape[1] + # (BC)
A.shape[0] * A.shape[1] * C.shape[1]) # A(--)
if cost1 < cost2:
return _dot(_dot(A, B), C)
else:
return _dot(A, _dot(B, C))
def _dotsum(x, y):
"""
Returns sum(x * y), where '*' is the pointwise product, computed
efficiently for dense and sparse matrices.
"""
if sparse.issparse(x):
return x.multiply(y).sum()
else:
# This way usually avoids allocating a temporary.
return np.dot(x.ravel(), y.ravel())
def _get_exog_re_names(self, exog_re):
"""
Passes through if given a list of names. Otherwise, gets pandas names
or creates some generic variable names as needed.
"""
if self.k_re == 0:
return []
if isinstance(exog_re, pd.DataFrame):
return exog_re.columns.tolist()
elif isinstance(exog_re, pd.Series) and exog_re.name is not None:
return [exog_re.name]
elif isinstance(exog_re, list):
return exog_re
return ["Z{0}".format(k + 1) for k in range(exog_re.shape[1])]
class MixedLMParams(object):
"""
This class represents a parameter state for a mixed linear model.
Parameters
----------
k_fe : integer
The number of covariates with fixed effects.
k_re : integer
The number of covariates with random coefficients (excluding
variance components).
k_vc : integer
The number of variance components parameters.
Notes
-----
This object represents the parameter state for the model in which
the scale parameter has been profiled out.
"""
def __init__(self, k_fe, k_re, k_vc):
self.k_fe = k_fe
self.k_re = k_re
self.k_re2 = k_re * (k_re + 1) // 2
self.k_vc = k_vc
self.k_tot = self.k_fe + self.k_re2 + self.k_vc
self._ix = np.tril_indices(self.k_re)
def from_packed(params, k_fe, k_re, use_sqrt, has_fe):
"""
Create a MixedLMParams object from packed parameter vector.
Parameters
----------
params : array-like
The mode parameters packed into a single vector.
k_fe : integer
The number of covariates with fixed effects
k_re : integer
The number of covariates with random effects (excluding
variance components).
use_sqrt : boolean
If True, the random effects covariance matrix is provided
as its Cholesky factor, otherwise the lower triangle of
the covariance matrix is stored.
has_fe : boolean
If True, `params` contains fixed effects parameters.
Otherwise, the fixed effects parameters are set to zero.
Returns
-------
A MixedLMParams object.
"""
k_re2 = int(k_re * (k_re + 1) / 2)
# The number of covariance parameters.
if has_fe:
k_vc = len(params) - k_fe - k_re2
else:
k_vc = len(params) - k_re2
pa = MixedLMParams(k_fe, k_re, k_vc)
cov_re = np.zeros((k_re, k_re))
ix = pa._ix
if has_fe:
pa.fe_params = params[0:k_fe]
cov_re[ix] = params[k_fe:k_fe+k_re2]
else:
pa.fe_params = np.zeros(k_fe)
cov_re[ix] = params[0:k_re2]
if use_sqrt:
cov_re = np.dot(cov_re, cov_re.T)
else:
cov_re = (cov_re + cov_re.T) - np.diag(np.diag(cov_re))
pa.cov_re = cov_re
if k_vc > 0:
if use_sqrt:
pa.vcomp = params[-k_vc:]**2
else:
pa.vcomp = params[-k_vc:]
else:
pa.vcomp = np.array([])
return pa
from_packed = staticmethod(from_packed)
def from_components(fe_params=None, cov_re=None, cov_re_sqrt=None, vcomp=None):
"""
Create a MixedLMParams object from each parameter component.
Parameters
----------
fe_params : array-like
The fixed effects parameter (a 1-dimensional array). If
None, there are no fixed effects.
cov_re : array-like
The random effects covariance matrix (a square, symmetric
2-dimensional array).
cov_re_sqrt : array-like
The Cholesky (lower triangular) square root of the random
effects covariance matrix.
vcomp : array-like
The variance component parameters. If None, there are no
variance components.
Returns
-------
A MixedLMParams object.
"""
if vcomp is None:
vcomp = np.empty(0)
if fe_params is None:
fe_params = np.empty(0)
if cov_re is None and cov_re_sqrt is None:
cov_re = np.empty((0, 0))
k_fe = len(fe_params)
k_vc = len(vcomp)
k_re = cov_re.shape[0] if cov_re is not None else cov_re_sqrt.shape[0]
pa = MixedLMParams(k_fe, k_re, k_vc)
pa.fe_params = fe_params
if cov_re_sqrt is not None:
pa.cov_re = np.dot(cov_re_sqrt, cov_re_sqrt.T)
elif cov_re is not None:
pa.cov_re = cov_re
pa.vcomp = vcomp
return pa
from_components = staticmethod(from_components)
def copy(self):
"""
Returns a copy of the object.
"""
obj = MixedLMParams(self.k_fe, self.k_re, self.k_vc)
obj.fe_params = self.fe_params.copy()
obj.cov_re = self.cov_re.copy()
obj.vcomp = self.vcomp.copy()
return obj
def get_packed(self, use_sqrt, has_fe=False):
"""
Return the model parameters packed into a single vector.
Parameters
----------
use_sqrt : bool
If True, the Cholesky square root of `cov_re` is
included in the packed result. Otherwise the
lower triangle of `cov_re` is included.
has_fe : bool
If True, the fixed effects parameters are included
in the packed result, otherwise they are omitted.
"""
if self.k_re > 0:
if use_sqrt:
L = np.linalg.cholesky(self.cov_re)
cpa = L[self._ix]
else:
cpa = self.cov_re[self._ix]
else:
cpa = np.zeros(0)
if use_sqrt:
vcomp = np.sqrt(self.vcomp)
else:
vcomp = self.vcomp
if has_fe:
pa = np.concatenate((self.fe_params, cpa, vcomp))
else:
pa = np.concatenate((cpa, vcomp))
return pa
def _smw_solver(s, A, AtA, BI, di):
"""
Solves the system (s*I + A*B*A') * x = rhs for an arbitrary rhs.
The inverse matrix of B is block diagonal. The upper left block
is BI and the lower right block is a diagonal matrix containing
di.
Parameters
----------
s : scalar
See above for usage
A : ndarray
See above for usage
AtA : square ndarray
A.T * A
BI : square symmetric ndarray
The inverse of `B`.
di : array-like
Returns
-------
A function that takes `rhs` as an input argument and returns a
solution to the linear system defined above.
"""
# Use SMW identity
qmat = AtA / s
m = BI.shape[0]
qmat[0:m, 0:m] += BI
ix = np.arange(m, A.shape[1])
qmat[ix, ix] += di
if sparse.issparse(A):
qi = sparse.linalg.inv(qmat)
qmati = A.dot(qi.T).T
else:
qmati = np.linalg.solve(qmat, A.T)
def solver(rhs):
if sparse.issparse(A):
ql = qmati.dot(rhs)
ql = A.dot(ql)
else:
ql = np.dot(qmati, rhs)
ql = np.dot(A, ql)
rslt = rhs / s - ql / s**2
if sparse.issparse(rslt):
rslt = np.asarray(rslt.todense())
return rslt
return solver
def _smw_logdet(s, A, AtA, BI, di, B_logdet):
"""
Returns the log determinant of s*I + A*B*A'.
Uses the matrix determinant lemma to accelerate the calculation.
Parameters
----------
s : scalar
See above for usage
A : square symmetric ndarray
See above for usage
AtA : square matrix
A.T * A
BI : square symmetric ndarray
The upper left block of B^-1.
di : array-like
The diagonal elements of the lower right block of B^-1.
B_logdet : real
The log determinant of B
Returns
-------
The log determinant of s*I + A*B*A'.
"""
p = A.shape[0]
ld = p * np.log(s)
qmat = AtA / s
m = BI.shape[0]
qmat[0:m, 0:m] += BI
ix = np.arange(m, A.shape[1])
qmat[ix, ix] += di
if sparse.issparse(qmat):
qmat = qmat.todense()
_, ld1 = np.linalg.slogdet(qmat)
return B_logdet + ld + ld1
class MixedLM(base.LikelihoodModel):
"""
An object specifying a linear mixed effects model. Use the `fit`
method to fit the model and obtain a results object.
Parameters
----------
endog : 1d array-like
The dependent variable
exog : 2d array-like
A matrix of covariates used to determine the
mean structure (the "fixed effects" covariates).
groups : 1d array-like
A vector of labels determining the groups -- data from
different groups are independent
exog_re : 2d array-like
A matrix of covariates used to determine the variance and
covariance structure (the "random effects" covariates). If
None, defaults to a random intercept for each group.
exog_vc : dict-like
A dicationary containing specifications of the variance
component terms. See below for details.
use_sqrt : bool
If True, optimization is carried out using the lower
triangle of the square root of the random effects
covariance matrix, otherwise it is carried out using the
lower triangle of the random effects covariance matrix.
missing : string
The approach to missing data handling
Notes
-----
`exog_vc` is a dictionary of dictionaries. Specifically,
`exog_vc[a][g]` is a matrix whose columns are linearly combined
using independent random coefficients. This random term then
contributes to the variance structure of the data for group `g`.
The random coefficients all have mean zero, and have the same
variance. The matrix must be `m x k`, where `m` is the number of
observations in group `g`. The number of columns may differ among
the top-level groups.
The covariates in `exog`, `exog_re` and `exog_vc` may (but need
not) partially or wholly overlap.
`use_sqrt` should almost always be set to True. The main use case
for use_sqrt=False is when complicated patterns of fixed values in
the covariance structure are set (using the `free` argument to
`fit`) that cannot be expressed in terms of the Cholesky factor L.
Examples
--------
A basic mixed model with fixed effects for the columns of
``exog`` and a random intercept for each distinct value of
``group``:
>>> model = sm.MixedLM(endog, exog, groups)
>>> result = model.fit()
A mixed model with fixed effects for the columns of ``exog`` and
correlated random coefficients for the columns of ``exog_re``:
>>> model = sm.MixedLM(endog, exog, groups, exog_re=exog_re)
>>> result = model.fit()
A mixed model with fixed effects for the columns of ``exog`` and
independent random coefficients for the columns of ``exog_re``:
>>> free = MixedLMParams.from_components(fe_params=np.ones(exog.shape[1]),
cov_re=np.eye(exog_re.shape[1]))
>>> model = sm.MixedLM(endog, exog, groups, exog_re=exog_re)
>>> result = model.fit(free=free)
A different way to specify independent random coefficients for the
columns of ``exog_re``. In this example ``groups`` must be a
Pandas Series with compatible indexing with ``exog_re``, and
``exog_re`` has two columns.
>>> g = pd.groupby(groups, by=groups).groups
>>> vc = {}
>>> vc['1'] = {k : exog_re.loc[g[k], 0] for k in g}
>>> vc['2'] = {k : exog_re.loc[g[k], 1] for k in g}
>>> model = sm.MixedLM(endog, exog, groups, vcomp=vc)
>>> result = model.fit()
"""
def __init__(self, endog, exog, groups, exog_re=None,
exog_vc=None, use_sqrt=True, missing='none',
**kwargs):
_allowed_kwargs = ["missing_idx", "design_info", "formula"]
for x in kwargs.keys():
if x not in _allowed_kwargs:
raise ValueError("argument %s not permitted for MixedLM initialization" % x)
self.use_sqrt = use_sqrt
# Some defaults
self.reml = True
self.fe_pen = None
self.re_pen = None
# Needs to run early so that the names are sorted.
self._setup_vcomp(exog_vc)
# If there is one covariate, it may be passed in as a column
# vector, convert these to 2d arrays.
# TODO: Can this be moved up in the class hierarchy?
# yes, it should be done up the hierarchy
if (exog is not None and
data_tools._is_using_ndarray_type(exog, None) and
exog.ndim == 1):
exog = exog[:, None]
if (exog_re is not None and
data_tools._is_using_ndarray_type(exog_re, None) and
exog_re.ndim == 1):
exog_re = exog_re[:, None]
# Calling super creates self.endog, etc. as ndarrays and the
# original exog, endog, etc. are self.data.endog, etc.
super(MixedLM, self).__init__(endog, exog, groups=groups,
exog_re=exog_re, missing=missing,
**kwargs)
self._init_keys.extend(["use_sqrt", "exog_vc"])
self.k_fe = exog.shape[1] # Number of fixed effects parameters
if exog_re is None and exog_vc is None:
# Default random effects structure (random intercepts).
self.k_re = 1
self.k_re2 = 1
self.exog_re = np.ones((len(endog), 1), dtype=np.float64)
self.data.exog_re = self.exog_re
self.data.param_names = self.exog_names + ['Group RE']
elif exog_re is not None:
# Process exog_re the same way that exog is handled
# upstream
# TODO: this is wrong and should be handled upstream wholly
self.data.exog_re = exog_re
self.exog_re = np.asarray(exog_re)
if self.exog_re.ndim == 1:
self.exog_re = self.exog_re[:, None]
# Model dimensions
# Number of random effect covariates
self.k_re = self.exog_re.shape[1]
# Number of covariance parameters
self.k_re2 = self.k_re * (self.k_re + 1) // 2
else:
# All random effects are variance components
self.k_re = 0
self.k_re2 = 0
if not self.data._param_names:
# HACK: could've been set in from_formula already
# needs refactor
(param_names, exog_re_names,
exog_re_names_full) = self._make_param_names(exog_re)
self.data.param_names = param_names
self.data.exog_re_names = exog_re_names
self.data.exog_re_names_full = exog_re_names_full
self.k_params = self.k_fe + self.k_re2
# Convert the data to the internal representation, which is a
# list of arrays, corresponding to the groups.
group_labels = list(set(groups))
group_labels.sort()
row_indices = dict((s, []) for s in group_labels)
for i,g in enumerate(groups):
row_indices[g].append(i)
self.row_indices = row_indices
self.group_labels = group_labels
self.n_groups = len(self.group_labels)
# Split the data by groups
self.endog_li = self.group_list(self.endog)
self.exog_li = self.group_list(self.exog)
self.exog_re_li = self.group_list(self.exog_re)
# Precompute this.
if self.exog_re is None:
self.exog_re2_li = None
else:
self.exog_re2_li = [np.dot(x.T, x) for x in self.exog_re_li]
# The total number of observations, summed over all groups
self.nobs = len(self.endog)
self.n_totobs = self.nobs
# Set the fixed effects parameter names
if self.exog_names is None:
self.exog_names = ["FE%d" % (k + 1) for k in
range(self.exog.shape[1])]
# Precompute this
self._aex_r = []
self._aex_r2 = []
for i in range(self.n_groups):
a = self._augment_exog(i)
self._aex_r.append(a)
self._aex_r2.append(_dot(a.T, a))
# Precompute this
self._lin, self._quad = self._reparam()
def _setup_vcomp(self, exog_vc):
if exog_vc is None:
exog_vc = {}
self.exog_vc = exog_vc
self.k_vc = len(exog_vc)
vc_names = list(set(exog_vc.keys()))
vc_names.sort()
self._vc_names = vc_names
def _make_param_names(self, exog_re):
"""
Returns the full parameter names list, just the exogenous random
effects variables, and the exogenous random effects variables with
the interaction terms.
"""
exog_names = list(self.exog_names)
exog_re_names = _get_exog_re_names(self, exog_re)
param_names = []
jj = self.k_fe
for i in range(len(exog_re_names)):
for j in range(i + 1):
if i == j:
param_names.append(exog_re_names[i] + " RE")
else:
param_names.append(exog_re_names[j] + " RE x " +
exog_re_names[i] + " RE")
jj += 1
vc_names = [x + " RE" for x in self._vc_names]
return exog_names + param_names + vc_names, exog_re_names, param_names
@classmethod
def from_formula(cls, formula, data, re_formula=None, vc_formula=None,
subset=None, use_sparse=False, *args, **kwargs):
"""
Create a Model from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
data : array-like
The data for the model. See Notes.
re_formula : string
A one-sided formula defining the variance structure of the
model. The default gives a random intercept for each
group.
vc_formula : dict-like
Formulas describing variance components. `vc_formula[vc]` is
the formula for the component with variance parameter named
`vc`. The formula is processed into a matrix, and the columns
of this matrix are linearly combined with independent random
coefficients having mean zero and a common variance.
subset : array-like
An array-like object of booleans, integers, or index
values that indicate the subset of df to use in the
model. Assumes df is a `pandas.DataFrame`
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with one exception. The
``eval_env`` keyword is passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace. If you wish
to use a "clean" environment set ``eval_env=-1``.
Returns
-------
model : Model instance
Notes
------
`data` must define __getitem__ with the keys in the formula
terms args and kwargs are passed on to the model
instantiation. E.g., a numpy structured or rec array, a
dictionary, or a pandas DataFrame.
If the variance component is intended to produce random
intercepts for disjoint subsets of a group, specified by
string labels or a categorical data value, always use '0 +' in
the formula so that no overall intercept is included.
If the variance components specify random slopes and you do
not also want a random group-level intercept in the model,
then use '0 +' in the formula to exclude the intercept.
The variance components formulas are processed separately for
each group. If a variable is categorical the results will not
be affected by whether the group labels are distinct or
re-used over the top-level groups.
This method currently does not correctly handle missing
values, so missing values should be explicitly dropped from
the DataFrame before calling this method.
Examples
--------
Suppose we have an educational data set with students nested
in classrooms nested in schools. The students take a test,
and we want to relate the test scores to the students' ages,
while accounting for the effects of classrooms and schools.
The school will be the top-level group, and the classroom is a
nested group that is specified as a variance component. Note
that the schools may have different number of classrooms, and
the classroom labels may (but need not be) different across
the schools.
>>> vc = {'classroom': '0 + C(classroom)'}
>>> MixedLM.from_formula('test_score ~ age', vc_formula=vc,
re_formula='1', groups='school', data=data)
Now suppose we also have a previous test score called
'pretest'. If we want the relationship between pretest
scores and the current test to vary by classroom, we can
specify a random slope for the pretest score
>>> vc = {'classroom': '0 + C(classroom)', 'pretest': '0 + pretest'}
>>> MixedLM.from_formula('test_score ~ age + pretest', vc_formula=vc,
re_formula='1', groups='school', data=data)
The following model is almost equivalent to the previous one,
but here the classroom random intercept and pretest slope may
be correlated.
>>> vc = {'classroom': '0 + C(classroom)'}
>>> MixedLM.from_formula('test_score ~ age + pretest', vc_formula=vc,
re_formula='1 + pretest', groups='school',
data=data)
"""
if "groups" not in kwargs.keys():
raise AttributeError("'groups' is a required keyword argument in MixedLM.from_formula")
# If `groups` is a variable name, retrieve the data for the
# groups variable.
group_name = "Group"
if type(kwargs["groups"]) == str:
group_name = kwargs["groups"]
kwargs["groups"] = np.asarray(data[kwargs["groups"]])
if re_formula is not None:
if re_formula.strip() == "1":
# Work around Patsy bug, fixed by 0.3.
exog_re = np.ones((data.shape[0], 1))
exog_re_names = ["Group"]
else:
eval_env = kwargs.get('eval_env', None)
if eval_env is None:
eval_env = 1
elif eval_env == -1:
from patsy import EvalEnvironment
eval_env = EvalEnvironment({})
exog_re = patsy.dmatrix(re_formula, data, eval_env=eval_env)
exog_re_names = exog_re.design_info.column_names
exog_re = np.asarray(exog_re)
if exog_re.ndim == 1:
exog_re = exog_re[:, None]
else:
exog_re = None
if vc_formula is None:
exog_re_names = ["groups"]
else:
exog_re_names = []
if vc_formula is not None:
eval_env = kwargs.get('eval_env', None)
if eval_env is None:
eval_env = 1
elif eval_env == -1:
from patsy import EvalEnvironment
eval_env = EvalEnvironment({})
exog_vc = {}
data["_group"] = kwargs["groups"]
gb = data.groupby("_group")
kylist = list(gb.groups.keys())
kylist.sort()
for vc_name in vc_formula.keys():
exog_vc[vc_name] = {}
for group_ix, group in enumerate(kylist):
ii = gb.groups[group]
vcg = vc_formula[vc_name]
mat = patsy.dmatrix(vcg, data.loc[ii, :], eval_env=eval_env,
return_type='dataframe')
if use_sparse:
exog_vc[vc_name][group] = sparse.csr_matrix(mat)
else:
exog_vc[vc_name][group] = np.asarray(mat)
exog_vc = exog_vc
else:
exog_vc = None
mod = super(MixedLM, cls).from_formula(formula, data,
subset=None,
exog_re=exog_re,
exog_vc=exog_vc,
*args, **kwargs)
# expand re names to account for pairs of RE
(param_names,
exog_re_names,
exog_re_names_full) = mod._make_param_names(exog_re_names)
mod.data.param_names = param_names
mod.data.exog_re_names = exog_re_names
mod.data.exog_re_names_full = exog_re_names_full
mod.data.vcomp_names = mod._vc_names
return mod
def predict(self, params, exog=None):
"""
Return predicted values from a design matrix.
Parameters
----------
params : array-like
Parameters of a mixed linear model. Can be either a
MixedLMParams instance, or a vector containing the packed
model parameters in which the fixed effects parameters are
at the beginning of the vector, or a vector containing
only the fixed effects parameters.
exog : array-like, optional
Design / exogenous data for the fixed effects. Model exog
is used if None.
Returns
-------
An array of fitted values. Note that these predicted values
only reflect the fixed effects mean structure of the model.
"""
if exog is None:
exog = self.exog
if isinstance(params, MixedLMParams):
params = params.fe_params
else:
params = params[0:self.k_fe]
return np.dot(exog, params)
def group_list(self, array):
"""
Returns `array` split into subarrays corresponding to the
grouping structure.
"""
if array is None:
return None
if array.ndim == 1:
return [np.array(array[self.row_indices[k]])
for k in self.group_labels]
else:
return [np.array(array[self.row_indices[k], :])
for k in self.group_labels]
def fit_regularized(self, start_params=None, method='l1', alpha=0,
ceps=1e-4, ptol=1e-6, maxit=200, **fit_kwargs):
"""
Fit a model in which the fixed effects parameters are
penalized. The dependence parameters are held fixed at their
estimated values in the unpenalized model.
Parameters
----------
method : string of Penalty object
Method for regularization. If a string, must be 'l1'.
alpha : array-like
Scalar or vector of penalty weights. If a scalar, the
same weight is applied to all coefficients; if a vector,
it contains a weight for each coefficient. If method is a
Penalty object, the weights are scaled by alpha. For L1
regularization, the weights are used directly.
ceps : positive real scalar
Fixed effects parameters smaller than this value
in magnitude are treaded as being zero.
ptol : positive real scalar
Convergence occurs when the sup norm difference
between successive values of `fe_params` is less than
`ptol`.
maxit : integer
The maximum number of iterations.
fit_kwargs : keywords
Additional keyword arguments passed to fit.
Returns
-------
A MixedLMResults instance containing the results.
Notes
-----
The covariance structure is not updated as the fixed effects
parameters are varied.
The algorithm used here for L1 regularization is a"shooting"
or cyclic coordinate descent algorithm.
If method is 'l1', then `fe_pen` and `cov_pen` are used to
obtain the covariance structure, but are ignored during the
L1-penalized fitting.
References
----------
Friedman, J. H., Hastie, T. and Tibshirani, R. Regularized
Paths for Generalized Linear Models via Coordinate
Descent. Journal of Statistical Software, 33(1) (2008)
http://www.jstatsoft.org/v33/i01/paper
http://statweb.stanford.edu/~tibs/stat315a/Supplements/fuse.pdf
"""
if type(method) == str and (method.lower() != 'l1'):
raise ValueError("Invalid regularization method")
# If method is a smooth penalty just optimize directly.
if isinstance(method, Penalty):
# Scale the penalty weights by alpha
method.alpha = alpha
fit_kwargs.update({"fe_pen": method})
return self.fit(**fit_kwargs)
if np.isscalar(alpha):
alpha = alpha * np.ones(self.k_fe, dtype=np.float64)
# Fit the unpenalized model to get the dependence structure.
mdf = self.fit(**fit_kwargs)
fe_params = mdf.fe_params
cov_re = mdf.cov_re
vcomp = mdf.vcomp
scale = mdf.scale
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
for itr in range(maxit):
fe_params_s = fe_params.copy()
for j in range(self.k_fe):
if abs(fe_params[j]) < ceps:
continue
# The residuals
fe_params[j] = 0.
expval = np.dot(self.exog, fe_params)
resid_all = self.endog - expval
# The loss function has the form
# a*x^2 + b*x + pwt*|x|
a, b = 0., 0.
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group)
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
resid = resid_all[self.row_indices[group]]
solver = _smw_solver(scale, ex_r, ex2_r, cov_re_inv, 1 / vc_var)
x = exog[:, j]
u = solver(x)
a += np.dot(u, x)
b -= 2 * np.dot(u, resid)
pwt1 = alpha[j]
if b > pwt1:
fe_params[j] = -(b - pwt1) / (2 * a)
elif b < -pwt1:
fe_params[j] = -(b + pwt1) / (2 * a)
if np.abs(fe_params_s - fe_params).max() < ptol:
break
# Replace the fixed effects estimates with their penalized
# values, leave the dependence parameters in their unpenalized
# state.
params_prof = mdf.params.copy()
params_prof[0:self.k_fe] = fe_params
scale = self.get_scale(fe_params, mdf.cov_re_unscaled, mdf.vcomp)
# Get the Hessian including only the nonzero fixed effects,
# then blow back up to the full size after inverting.
hess = self.hessian(params_prof)
pcov = np.nan * np.ones_like(hess)
ii = np.abs(params_prof) > ceps
ii[self.k_fe:] = True
ii = np.flatnonzero(ii)
hess1 = hess[ii, :][:, ii]
pcov[np.ix_(ii,ii)] = np.linalg.inv(-hess1)
params_object = MixedLMParams.from_components(fe_params, cov_re=cov_re)
results = MixedLMResults(self, params_prof, pcov / scale)
results.params_object = params_object
results.fe_params = fe_params
results.cov_re = cov_re
results.scale = scale
results.cov_re_unscaled = mdf.cov_re_unscaled
results.method = mdf.method
results.converged = True
results.cov_pen = self.cov_pen
results.k_fe = self.k_fe
results.k_re = self.k_re
results.k_re2 = self.k_re2
results.k_vc = self.k_vc
return MixedLMResultsWrapper(results)
def get_fe_params(self, cov_re, vcomp):
"""
Use GLS to update the fixed effects parameter estimates.
Parameters
----------
cov_re : array-like
The covariance matrix of the random effects.
Returns
-------
The GLS estimates of the fixed effects parameters.
"""
if self.k_fe == 0:
return np.array([])
if self.k_re == 0:
cov_re_inv = np.empty((0,0))
else:
cov_re_inv = np.linalg.inv(cov_re)
# Cache these quantities that don't change.
if not hasattr(self, "_endex_li"):
self._endex_li = []
for group_ix, _ in enumerate(self.group_labels):
mat = np.concatenate((self.exog_li[group_ix], self.endog_li[group_ix][:, None]), axis=1)
self._endex_li.append(mat)
xtxy = 0.
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group)
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var)
u = solver(self._endex_li[group_ix])
xtxy += np.dot(exog.T, u)
fe_params = np.linalg.solve(xtxy[:, 0:-1], xtxy[:, -1])
return fe_params
def _reparam(self):
"""
Returns parameters of the map converting parameters from the
form used in optimization to the form returned to the user.
Returns
-------
lin : list-like
Linear terms of the map
quad : list-like
Quadratic terms of the map
Notes
-----
If P are the standard form parameters and R are the
transformed parameters (i.e. with the Cholesky square root
covariance and square root transformed variane components),
then P[i] = lin[i] * R + R' * quad[i] * R
"""
k_fe, k_re, k_re2, k_vc = self.k_fe, self.k_re, self.k_re2, self.k_vc
k_tot = k_fe + k_re2 + k_vc
ix = np.tril_indices(self.k_re)
lin = []
for k in range(k_fe):
e = np.zeros(k_tot)
e[k] = 1
lin.append(e)
for k in range(k_re2):
lin.append(np.zeros(k_tot))
for k in range(k_vc):
lin.append(np.zeros(k_tot))
quad = []
# Quadratic terms for fixed effects.
for k in range(k_tot):
quad.append(np.zeros((k_tot, k_tot)))
# Quadratic terms for random effects covariance.
ii = np.tril_indices(k_re)
ix = [(a,b) for a,b in zip(ii[0], ii[1])]
for i1 in range(k_re2):
for i2 in range(k_re2):
ix1 = ix[i1]
ix2 = ix[i2]
if (ix1[1] == ix2[1]) and (ix1[0] <= ix2[0]):
ii = (ix2[0], ix1[0])
k = ix.index(ii)
quad[k_fe+k][k_fe+i2, k_fe+i1] += 1
for k in range(k_tot):
quad[k] = 0.5*(quad[k] + quad[k].T)
# Quadratic terms for variance components.
km = k_fe + k_re2
for k in range(km, km+k_vc):
quad[k][k, k] = 1
return lin, quad
def _expand_vcomp(self, vcomp, group):
"""
Replicate variance parameters to match a group's design.
Parameters
----------
vcomp : array-like
The variance parameters for the variance components.
group : string
The group label
Returns an expaded version of vcomp, in which each variance
parameter is copied as many times as there are independent
realizations of the variance component in the given group.
"""
if len(vcomp) == 0:
return np.empty(0)
vc_var = []
for j, k in enumerate(self._vc_names):
if group in self.exog_vc[k]:
vc_var.append(vcomp[j] * np.ones(self.exog_vc[k][group].shape[1]))
if len(vc_var) > 0:
return np.concatenate(vc_var)
else:
1/0
return np.empty(0)
def _augment_exog(self, group_ix):
"""
Concatenate the columns for variance components to the columns
for other random effects to obtain a single random effects
exog matrix for a given group.
"""
ex_r = self.exog_re_li[group_ix] if self.k_re > 0 else None
if self.k_vc == 0:
return ex_r
group = self.group_labels[group_ix]
ex = [ex_r] if self.k_re > 0 else []
any_sparse = False
for j,k in enumerate(self._vc_names):
if group not in self.exog_vc[k]:
continue
ex.append(self.exog_vc[k][group])
any_sparse |= sparse.issparse(ex[-1])
if any_sparse:
for j, x in enumerate(ex):
if not sparse.issparse(x):
ex[j] = sparse.csr_matrix(x)
ex = sparse.hstack(ex)
ex = sparse.csr_matrix(ex)
else:
ex = np.concatenate(ex, axis=1)
return ex
def loglike(self, params, profile_fe=True):
"""
Evaluate the (profile) log-likelihood of the linear mixed
effects model.
Parameters
----------
params : MixedLMParams, or array-like.
The parameter value. If array-like, must be a packed
parameter vector containing only the covariance
parameters.
profile_fe : boolean
If True, replace the provided value of `fe_params` with
the GLS estimates.
Returns
-------
The log-likelihood value at `params`.
Notes
-----
The scale parameter `scale` is always profiled out of the
log-likelihood. In addition, if `profile_fe` is true the
fixed effects parameters are also profiled out.
"""
if type(params) is not MixedLMParams:
params = MixedLMParams.from_packed(params, self.k_fe,
self.k_re, self.use_sqrt,
has_fe=False)
cov_re = params.cov_re
vcomp = params.vcomp
# Move to the profile set
if profile_fe:
fe_params = self.get_fe_params(cov_re, vcomp)
else:
fe_params = params.fe_params
if self.k_re > 0:
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
_, cov_re_logdet = np.linalg.slogdet(cov_re)
else:
cov_re_inv = np.zeros((0, 0))
cov_re_logdet = 0
# The residuals
expval = np.dot(self.exog, fe_params)
resid_all = self.endog - expval
likeval = 0.
# Handle the covariance penalty
if (self.cov_pen is not None) and (self.k_re > 0):
likeval -= self.cov_pen.func(cov_re, cov_re_inv)
# Handle the fixed effects penalty
if (self.fe_pen is not None):
likeval -= self.fe_pen.func(fe_params)
xvx, qf = 0., 0.
for k, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group)
cov_aug_logdet = cov_re_logdet + np.sum(np.log(vc_var))
exog = self.exog_li[k]
ex_r, ex2_r = self._aex_r[k], self._aex_r2[k]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var)
resid = resid_all[self.row_indices[group]]
# Part 1 of the log likelihood (for both ML and REML)
ld = _smw_logdet(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var, cov_aug_logdet)
likeval -= ld / 2.
# Part 2 of the log likelihood (for both ML and REML)
u = solver(resid)
qf += np.dot(resid, u)
# Adjustment for REML
if self.reml:
mat = solver(exog)
xvx += np.dot(exog.T, mat)
if self.reml:
likeval -= (self.n_totobs - self.k_fe) * np.log(qf) / 2.
_,ld = np.linalg.slogdet(xvx)
likeval -= ld / 2.
likeval -= (self.n_totobs - self.k_fe) * np.log(2 * np.pi) / 2.
likeval += ((self.n_totobs - self.k_fe) *
np.log(self.n_totobs - self.k_fe) / 2.)
likeval -= (self.n_totobs - self.k_fe) / 2.
else:
likeval -= self.n_totobs * np.log(qf) / 2.
likeval -= self.n_totobs * np.log(2 * np.pi) / 2.
likeval += self.n_totobs * np.log(self.n_totobs) / 2.
likeval -= self.n_totobs / 2.
return likeval
def _gen_dV_dPar(self, ex_r, solver, group, max_ix=None):
"""
A generator that yields the element-wise derivative of the
marginal covariance matrix with respect to the random effects
variance and covariance parameters.
ex_r : array-like
The random effects design matrix
solver : function
A function that given x returns V^{-1}x, where V
is the group's marginal covariance matrix.
group : scalar
The group label
max_ix : integer or None
If not None, the generator ends when this index
is reached.
"""
axr = solver(ex_r)
# Regular random effects
jj = 0
for j1 in range(self.k_re):
for j2 in range(j1 + 1):
if max_ix is not None and jj > max_ix:
return
mat_l, mat_r = ex_r[:,j1:j1+1], ex_r[:,j2:j2+1] # Need 2d
vsl, vsr = axr[:,j1:j1+1], axr[:,j2:j2+1]
yield jj, mat_l, mat_r, vsl, vsr, j1 == j2
jj += 1
# Variance components
for ky in self._vc_names:
if group in self.exog_vc[ky]:
if max_ix is not None and jj > max_ix:
return
mat = self.exog_vc[ky][group]
axmat = solver(mat)
yield jj, mat, mat, axmat, axmat, True
jj += 1
def score(self, params, profile_fe=True):
"""
Returns the score vector of the profile log-likelihood.
Notes
-----
The score vector that is returned is computed with respect to
the parameterization defined by this model instance's
`use_sqrt` attribute.
"""
if type(params) is not MixedLMParams:
params = MixedLMParams.from_packed(params, self.k_fe,
self.k_re, self.use_sqrt,
has_fe=False)
if profile_fe:
params.fe_params = self.get_fe_params(params.cov_re, params.vcomp)
if self.use_sqrt:
score_fe, score_re, score_vc = self.score_sqrt(params, calc_fe=not profile_fe)
else:
score_fe, score_re, score_vc = self.score_full(params, calc_fe=not profile_fe)
if self._freepat is not None:
score_fe *= self._freepat.fe_params
score_re *= self._freepat.cov_re[self._freepat._ix]
score_vc *= self._freepat.vcomp
if profile_fe:
return np.concatenate((score_re, score_vc))
else:
return np.concatenate((score_fe, score_re, score_vc))
def score_full(self, params, calc_fe):
"""
Returns the score with respect to untransformed parameters.
Calculates the score vector for the profiled log-likelihood of
the mixed effects model with respect to the parameterization
in which the random effects covariance matrix is represented
in its full form (not using the Cholesky factor).
Parameters
----------
params : MixedLMParams or array-like
The parameter at which the score function is evaluated.
If array-like, must contain the packed random effects
parameters (cov_re and vcomp) without fe_params.
calc_fe : boolean
If True, calculate the score vector for the fixed effects
parameters. If False, this vector is not calculated, and
a vector of zeros is returned in its place.
Returns
-------
score_fe : array-like
The score vector with respect to the fixed effects
parameters.
score_re : array-like
The score vector with respect to the random effects
parameters (excluding variance components parameters).
score_vc : array-like
The score vector with respect to variance components
parameters.
Notes
-----
`score_re` is taken with respect to the parameterization in
which `cov_re` is represented through its lower triangle
(without taking the Cholesky square root).
"""
fe_params = params.fe_params
cov_re = params.cov_re
vcomp = params.vcomp
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
score_fe = np.zeros(self.k_fe)
score_re = np.zeros(self.k_re2)
score_vc = np.zeros(self.k_vc)
# Handle the covariance penalty.
if self.cov_pen is not None:
score_re -= self.cov_pen.grad(cov_re, cov_re_inv)
# Handle the fixed effects penalty.
if calc_fe and (self.fe_pen is not None):
score_fe -= self.fe_pen.grad(fe_params)
# resid' V^{-1} resid, summed over the groups (a scalar)
rvir = 0.
# exog' V^{-1} resid, summed over the groups (a k_fe
# dimensional vector)
xtvir = 0.
# exog' V^{_1} exog, summed over the groups (a k_fe x k_fe
# matrix)
xtvix = 0.
# V^{-1} exog' dV/dQ_jj exog V^{-1}, where Q_jj is the jj^th
# covariance parameter.
xtax = [0.,] * (self.k_re2 + self.k_vc)
# Temporary related to the gradient of log |V|
dlv = np.zeros(self.k_re2 + self.k_vc)
# resid' V^{-1} dV/dQ_jj V^{-1} resid (a scalar)
rvavr = np.zeros(self.k_re2 + self.k_vc)
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group)
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var)
# The residuals
resid = self.endog_li[group_ix]
if self.k_fe > 0:
expval = np.dot(exog, fe_params)
resid = resid - expval
if self.reml:
viexog = solver(exog)
xtvix += np.dot(exog.T, viexog)
# Contributions to the covariance parameter gradient
vir = solver(resid)
for jj, matl, matr, vsl, vsr, sym in self._gen_dV_dPar(ex_r, solver, group):
dlv[jj] = _dotsum(matr, vsl)
if not sym:
dlv[jj] += _dotsum(matl, vsr)
ul = _dot(vir, matl)
ur = ul.T if sym else _dot(matr.T, vir)
ulr = np.dot(ul, ur)
rvavr[jj] += ulr
if not sym:
rvavr[jj] += ulr.T
if self.reml:
ul = _dot(viexog.T, matl)
ur = ul.T if sym else _dot(matr.T, viexog)
ulr = np.dot(ul, ur)
xtax[jj] += ulr
if not sym:
xtax[jj] += ulr.T
# Contribution of log|V| to the covariance parameter
# gradient.
if self.k_re > 0:
score_re -= 0.5 * dlv[0:self.k_re2]
if self.k_vc > 0:
score_vc -= 0.5 * dlv[self.k_re2:]
rvir += np.dot(resid, vir)
if calc_fe:
xtvir += np.dot(exog.T, vir)
fac = self.n_totobs
if self.reml:
fac -= self.k_fe
if calc_fe and self.k_fe > 0:
score_fe += fac * xtvir / rvir
if self.k_re > 0:
score_re += 0.5 * fac * rvavr[0:self.k_re2] / rvir
if self.k_vc > 0:
score_vc += 0.5 * fac * rvavr[self.k_re2:] / rvir
if self.reml:
xtvixi = np.linalg.inv(xtvix)
for j in range(self.k_re2):
score_re[j] += 0.5 * _dotsum(xtvixi.T, xtax[j])
for j in range(self.k_vc):
score_vc[j] += 0.5 * _dotsum(xtvixi.T, xtax[self.k_re2 + j])
return score_fe, score_re, score_vc
def score_sqrt(self, params, calc_fe=True):
"""
Returns the score with respect to transformed parameters.
Calculates the score vector with respect to the
parameterization in which the random effects covariance matrix
is represented through its Cholesky square root.
Parameters
----------
params : MixedLMParams or array-like
The model parameters. If array-like must contain packed
parameters that are compatible with this model instance.
calc_fe : boolean
If True, calculate the score vector for the fixed effects
parameters. If False, this vector is not calculated, and
a vector of zeros is returned in its place.
Returns
-------
score_fe : array-like
The score vector with respect to the fixed effects
parameters.
score_re : array-like
The score vector with respect to the random effects
parameters (excluding variance components parameters).
score_vc : array-like
The score vector with respect to variance components
parameters.
"""
score_fe, score_re, score_vc = self.score_full(params, calc_fe=calc_fe)
params_vec = params.get_packed(use_sqrt=True, has_fe=True)
score_full = np.concatenate((score_fe, score_re, score_vc))
scr = 0.
for i in range(len(params_vec)):
v = self._lin[i] + 2 * np.dot(self._quad[i], params_vec)
scr += score_full[i] * v
score_fe = scr[0:self.k_fe]
score_re = scr[self.k_fe:self.k_fe + self.k_re2]
score_vc = scr[self.k_fe + self.k_re2:]
return score_fe, score_re, score_vc
def hessian(self, params):
"""
Returns the model's Hessian matrix.
Calculates the Hessian matrix for the linear mixed effects
model with respect to the parameterization in which the
covariance matrix is represented directly (without square-root
transformation).
Parameters
----------
params : MixedLMParams or array-like
The model parameters at which the Hessian is calculated.
If array-like, must contain the packed parameters in a
form that is compatible with this model instance.
Returns
-------
hess : 2d ndarray
The Hessian matrix, evaluated at `params`.
"""
if type(params) is not MixedLMParams:
params = MixedLMParams.from_packed(params, self.k_fe, self.k_re,
use_sqrt=self.use_sqrt,
has_fe=True)
fe_params = params.fe_params
vcomp = params.vcomp
cov_re = params.cov_re
if self.k_re > 0:
cov_re_inv = np.linalg.inv(cov_re)
else:
cov_re_inv = np.empty((0, 0))
# Blocks for the fixed and random effects parameters.
hess_fe = 0.
hess_re = np.zeros((self.k_re2 + self.k_vc, self.k_re2 + self.k_vc))
hess_fere = np.zeros((self.k_re2 + self.k_vc, self.k_fe))
fac = self.n_totobs
if self.reml:
fac -= self.exog.shape[1]
rvir = 0.
xtvix = 0.
xtax = [0.,] * (self.k_re2 + self.k_vc)
m = self.k_re2 + self.k_vc
B = np.zeros(m)
D = np.zeros((m, m))
F = [[0.] * m for k in range(m)]
for k, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group)
exog = self.exog_li[k]
ex_r, ex2_r = self._aex_r[k], self._aex_r2[k]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var)
# The residuals
resid = self.endog_li[k]
if self.k_fe > 0:
expval = np.dot(exog, fe_params)
resid = resid - expval
viexog = solver(exog)
xtvix += np.dot(exog.T, viexog)
vir = solver(resid)
rvir += np.dot(resid, vir)
for jj1, matl1, matr1, vsl1, vsr1, sym1 in self._gen_dV_dPar(ex_r, solver, group):
ul = _dot(viexog.T, matl1)
ur = _dot(matr1.T, vir)
hess_fere[jj1, :] += np.dot(ul, ur)
if not sym1:
ul = _dot(viexog.T, matr1)
ur = _dot(matl1.T, vir)
hess_fere[jj1, :] += np.dot(ul, ur)
if self.reml:
ul = _dot(viexog.T, matl1)
ur = ul if sym1 else np.dot(viexog.T, matr1)
ulr = _dot(ul, ur.T)
xtax[jj1] += ulr
if not sym1:
xtax[jj1] += ulr.T
ul = _dot(vir, matl1)
ur = ul if sym1 else _dot(vir, matr1)
B[jj1] += np.dot(ul, ur) * (1 if sym1 else 2)
# V^{-1} * dV/d_theta
E = [(vsl1, matr1)]
if not sym1:
E.append((vsr1, matl1))
for jj2, matl2, matr2, vsl2, vsr2, sym2 in self._gen_dV_dPar(ex_r, solver, group, jj1):
re = sum([_multi_dot_three(matr2.T, x[0], x[1].T) for x in E])
vt = 2 * _dot(_multi_dot_three(vir[None, :], matl2, re), vir[:, None])
if not sym2:
le = sum([_multi_dot_three(matl2.T, x[0], x[1].T) for x in E])
vt += 2 * _dot(_multi_dot_three(vir[None, :], matr2, le), vir[:, None])
D[jj1, jj2] += vt
if jj1 != jj2:
D[jj2, jj1] += vt
rt = _dotsum(vsl2, re.T) / 2
if not sym2:
rt += _dotsum(vsr2, le.T) / 2
hess_re[jj1, jj2] += rt
if jj1 != jj2:
hess_re[jj2, jj1] += rt
if self.reml:
ev = sum([_dot(x[0], _dot(x[1].T, viexog)) for x in E])
u1 = _dot(viexog.T, matl2)
u2 = _dot(matr2.T, ev)
um = np.dot(u1, u2)
F[jj1][jj2] += um + um.T
if not sym2:
u1 = np.dot(viexog.T, matr2)
u2 = np.dot(matl2.T, ev)
um = np.dot(u1, u2)
F[jj1][jj2] += um + um.T
hess_fe -= fac * xtvix / rvir
hess_re = hess_re - 0.5 * fac * (D/rvir - np.outer(B, B) / rvir**2)
hess_fere = -fac * hess_fere / rvir
if self.reml:
QL = [np.linalg.solve(xtvix, x) for x in xtax]
for j1 in range(self.k_re2 + self.k_vc):
for j2 in range(j1 + 1):
a = _dotsum(QL[j1].T, QL[j2])
a -= np.trace(np.linalg.solve(xtvix, F[j1][j2]))
a *= 0.5
hess_re[j1, j2] += a
if j1 > j2:
hess_re[j2, j1] += a
# Put the blocks together to get the Hessian.
m = self.k_fe + self.k_re2 + self.k_vc
hess = np.zeros((m, m))
hess[0:self.k_fe, 0:self.k_fe] = hess_fe
hess[0:self.k_fe, self.k_fe:] = hess_fere.T
hess[self.k_fe:, 0:self.k_fe] = hess_fere
hess[self.k_fe:, self.k_fe:] = hess_re
return hess
def get_scale(self, fe_params, cov_re, vcomp):
"""
Returns the estimated error variance based on given estimates
of the slopes and random effects covariance matrix.
Parameters
----------
fe_params : array-like
The regression slope estimates
cov_re : 2d array-like
Estimate of the random effects covariance matrix
vcomp : array-like
Estimate of the variance components
Returns
-------
scale : float
The estimated error variance.
"""
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
qf = 0.
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group)
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var)
# The residuals
resid = self.endog_li[group_ix]
if self.k_fe > 0:
expval = np.dot(exog, fe_params)
resid = resid - expval
mat = solver(resid)
qf += np.dot(resid, mat)
if self.reml:
qf /= (self.n_totobs - self.k_fe)
else:
qf /= self.n_totobs
return qf
def fit(self, start_params=None, reml=True, niter_sa=0,
do_cg=True, fe_pen=None, cov_pen=None, free=None,
full_output=False, method='bfgs', **kwargs):
"""
Fit a linear mixed model to the data.
Parameters
----------
start_params: array-like or MixedLMParams
Starting values for the profile log-likeihood. If not a
`MixedLMParams` instance, this should be an array
containing the packed parameters for the profile
log-likelihood, including the fixed effects
parameters.
reml : bool
If true, fit according to the REML likelihood, else
fit the standard likelihood using ML.
cov_pen : CovariancePenalty object
A penalty for the random effects covariance matrix
fe_pen : Penalty object
A penalty on the fixed effects
free : MixedLMParams object
If not `None`, this is a mask that allows parameters to be
held fixed at specified values. A 1 indicates that the
correspondinig parameter is estimated, a 0 indicates that
it is fixed at its starting value. Setting the `cov_re`
component to the identity matrix fits a model with
independent random effects. Note that some optimization
methods do not respect this contraint (bfgs and lbfgs both
work).
full_output : bool
If true, attach iteration history to results
method : string
Optimization method.
Returns
-------
A MixedLMResults instance.
"""
_allowed_kwargs = ['gtol', 'maxiter']
for x in kwargs.keys():
if x not in _allowed_kwargs:
raise ValueError("Argument %s not allowed for MixedLM.fit" % x)
if method.lower() in ["newton", "ncg"]:
raise ValueError("method %s not available for MixedLM" % method)
self.reml = reml
self.cov_pen = cov_pen
self.fe_pen = fe_pen
self._freepat = free
if full_output:
hist = []
else:
hist = None
success = False
if start_params is None:
params = MixedLMParams(self.k_fe, self.k_re, self.k_vc)
params.fe_params = np.zeros(self.k_fe)
params.cov_re = np.eye(self.k_re)
params.vcomp = np.ones(self.k_vc)
else:
if isinstance(start_params, MixedLMParams):
params = start_params
else:
# It's a packed array
if len(start_params) == self.k_fe + self.k_re2 + self.k_vc:
params = MixedLMParams.from_packed(start_params, self.k_fe,
self.k_re, self.use_sqrt,
has_fe=True)
elif len(start_params) == self.k_re2 + self.k_vc:
params = MixedLMParams.from_packed(start_params, self.k_fe,
self.k_re, self.use_sqrt,
has_fe=False)
else:
raise ValueError("invalid start_params")
if do_cg:
kwargs["retall"] = hist is not None
if "disp" not in kwargs:
kwargs["disp"] = False
packed = params.get_packed(use_sqrt=self.use_sqrt, has_fe=False)
# It seems that the optimizers sometimes stop too soon, so
# we run a few times.
for rep in range(5):
rslt = super(MixedLM, self).fit(start_params=packed,
skip_hessian=True,
method=method,
**kwargs)
if rslt.mle_retvals['converged']:
break
packed = rslt.params
# The optimization succeeded
params = np.atleast_1d(rslt.params)
if hist is not None:
hist.append(rslt.mle_retvals)
converged = rslt.mle_retvals['converged']
if not converged:
msg = "Gradient optimization failed."
warnings.warn(msg, ConvergenceWarning)
# Convert to the final parameterization (i.e. undo the square
# root transform of the covariance matrix, and the profiling
# over the error variance).
params = MixedLMParams.from_packed(params, self.k_fe, self.k_re,
use_sqrt=self.use_sqrt, has_fe=False)
cov_re_unscaled = params.cov_re
vcomp_unscaled = params.vcomp
fe_params = self.get_fe_params(cov_re_unscaled, vcomp_unscaled)
params.fe_params = fe_params
scale = self.get_scale(fe_params, cov_re_unscaled, vcomp_unscaled)
cov_re = scale * cov_re_unscaled
vcomp = scale * vcomp_unscaled
if (((self.k_re > 0) and (np.min(np.abs(np.diag(cov_re))) < 0.01)) or
((self.k_vc > 0) and (np.min(np.abs(vcomp)) < 0.01))):
msg = "The MLE may be on the boundary of the parameter space."
warnings.warn(msg, ConvergenceWarning)
# Compute the Hessian at the MLE. Note that this is the
# Hessian with respect to the random effects covariance matrix
# (not its square root). It is used for obtaining standard
# errors, not for optimization.
hess = self.hessian(params)
hess_diag = np.diag(hess)
if free is not None:
pcov = np.zeros_like(hess)
pat = self._freepat.get_packed(use_sqrt=False, has_fe=True)
ii = np.flatnonzero(pat)
hess_diag = hess_diag[ii]
if len(ii) > 0:
hess1 = hess[np.ix_(ii, ii)]
pcov[np.ix_(ii, ii)] = np.linalg.inv(-hess1)
else:
pcov = np.linalg.inv(-hess)
if np.any(hess_diag >= 0):
msg = "The Hessian matrix at the estimated parameter values is not positive definite."
warnings.warn(msg, ConvergenceWarning)
# Prepare a results class instance
params_packed = params.get_packed(use_sqrt=False, has_fe=True)
results = MixedLMResults(self, params_packed, pcov / scale)
results.params_object = params
results.fe_params = fe_params
results.cov_re = cov_re
results.vcomp = vcomp
results.scale = scale
results.cov_re_unscaled = cov_re_unscaled
results.method = "REML" if self.reml else "ML"
results.converged = converged
results.hist = hist
results.reml = self.reml
results.cov_pen = self.cov_pen
results.k_fe = self.k_fe
results.k_re = self.k_re
results.k_re2 = self.k_re2
results.k_vc = self.k_vc
results.use_sqrt = self.use_sqrt
results.freepat = self._freepat
return MixedLMResultsWrapper(results)
class MixedLMResults(base.LikelihoodModelResults, base.ResultMixin):
'''
Class to contain results of fitting a linear mixed effects model.
MixedLMResults inherits from statsmodels.LikelihoodModelResults
Parameters
----------
See statsmodels.LikelihoodModelResults
Returns
-------
**Attributes**
model : class instance
Pointer to PHreg model instance that called fit.
normalized_cov_params : array
The sampling covariance matrix of the estimates
fe_params : array
The fitted fixed-effects coefficients
re_params : array
The fitted random-effects covariance matrix
bse_fe : array
The standard errors of the fitted fixed effects coefficients
bse_re : array
The standard errors of the fitted random effects covariance
matrix
See Also
--------
statsmodels.LikelihoodModelResults
'''
def __init__(self, model, params, cov_params):
super(MixedLMResults, self).__init__(model, params,
normalized_cov_params=cov_params)
self.nobs = self.model.nobs
self.df_resid = self.nobs - np_matrix_rank(self.model.exog)
@cache_readonly
def fittedvalues(self):
"""
Returns the fitted values for the model.
The fitted values reflect the mean structure specified by the
fixed effects and the predicted random effects.
"""
fit = np.dot(self.model.exog, self.fe_params)
re = self.random_effects
for group_ix, group in enumerate(self.model.group_labels):
ix = self.model.row_indices[group]
mat = [self.model.exog_re_li[group_ix]]
for c in self.model._vc_names:
if group in self.model.exog_vc[c]:
mat.append(self.model.exog_vc[c][group])
mat = np.concatenate(mat, axis=1)
fit[ix] += np.dot(mat, re[group])
return fit
@cache_readonly
def resid(self):
"""
Returns the residuals for the model.
The residuals reflect the mean structure specified by the
fixed effects and the predicted random effects.
"""
return self.model.endog - self.fittedvalues
@cache_readonly
def bse_fe(self):
"""
Returns the standard errors of the fixed effect regression
coefficients.
"""
p = self.model.exog.shape[1]
return np.sqrt(np.diag(self.cov_params())[0:p])
@cache_readonly
def bse_re(self):
"""
Returns the standard errors of the variance parameters. Note
that the sampling distribution of variance parameters is
strongly skewed unless the sample size is large, so these
standard errors may not give meaningful confidence intervals
of p-values if used in the usual way.
"""
p = self.model.exog.shape[1]
return np.sqrt(self.scale * np.diag(self.cov_params())[p:])
def _expand_re_names(self, group):
names = list(self.model.data.exog_re_names)
for v in self.model._vc_names:
if group in self.model.exog_vc[v]:
ix = range(self.model.exog_vc[v][group].shape[1])
na = ["%s[%d]" % (v, j + 1) for j in ix]
names.extend(na)
return names
@cache_readonly
def random_effects(self):
"""
The conditional means of random effects given the data.
Returns
-------
random_effects : dict
A dictionary mapping the distinct `group` values to the
means of the random effects for the group.
"""
try:
cov_re_inv = np.linalg.inv(self.cov_re)
except np.linalg.LinAlgError:
raise ValueError("Cannot predict random effects from singular covariance structure.")
vcomp = self.vcomp
k_re = self.k_re
ranef_dict = {}
for group_ix, group in enumerate(self.model.group_labels):
endog = self.model.endog_li[group_ix]
exog = self.model.exog_li[group_ix]
ex_r, ex2_r = self.model._aex_r[group_ix], self.model._aex_r2[group_ix]
vc_var = self.model._expand_vcomp(vcomp, group)
# Get the residuals relative to fixed effects
resid = endog
if self.k_fe > 0:
expval = np.dot(exog, self.fe_params)
resid = resid - expval
solver = _smw_solver(self.scale, ex_r, ex2_r, cov_re_inv, 1 / vc_var)
vir = solver(resid)
xtvir = _dot(ex_r.T, vir)
xtvir[0:k_re] = np.dot(self.cov_re, xtvir[0:k_re])
xtvir[k_re:] *= vc_var
ranef_dict[group] = pd.Series(xtvir, index=self._expand_re_names(group))
return ranef_dict
@cache_readonly
def random_effects_cov(self):
"""
Returns the conditional covariance matrix of the random
effects for each group given the data.
Returns
-------
random_effects_cov : dict
A dictionary mapping the distinct values of the `group`
variable to the conditional covariance matrix of the
random effects given the data.
"""
try:
cov_re_inv = np.linalg.inv(self.cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
vcomp = self.vcomp
ranef_dict = {}
for group_ix in range(self.model.n_groups):
ex_r, ex2_r = self.model._aex_r[group_ix], self.model._aex_r2[group_ix]
label = self.model.group_labels[group_ix]
vc_var = self.model._expand_vcomp(vcomp, group_ix)
solver = _smw_solver(self.scale, ex_r, ex2_r, cov_re_inv, 1 / vc_var)
n = ex_r.shape[0]
m = self.cov_re.shape[0]
mat1 = np.empty((n, m))
mat1[:, 0:m] = np.dot(ex_r[:, 0:m], self.cov_re)
mat1[:, m:] = np.dot(ex_r[:, m:], np.diag(vc_var))
mat2 = solver(mat1)
mat2 = np.dot(mat1.T, mat2)
v = -mat2
v[0:m, 0:m] += self.cov_re
ix = np.arange(m, v.shape[0])
v[ix, ix] += vc_var
na = self._expand_re_names(group_ix)
v = pd.DataFrame(v, index=na, columns=na)
ranef_dict[label] = v
return ranef_dict
# Need to override since t-tests are only used for fixed effects parameters.
def t_test(self, r_matrix, scale=None, use_t=None):
"""
Compute a t-test for a each linear hypothesis of the form Rb = q
Parameters
----------
r_matrix : array-like
If an array is given, a p x k 2d array or length k 1d
array specifying the linear restrictions. It is assumed
that the linear combination is equal to zero.
scale : float, optional
An optional `scale` to use. Default is the scale specified
by the model fit.
use_t : bool, optional
If use_t is None, then the default of the model is used.
If use_t is True, then the p-values are based on the t
distribution.
If use_t is False, then the p-values are based on the normal
distribution.
Returns
-------
res : ContrastResults instance
The results for the test are attributes of this results instance.
The available results have the same elements as the parameter table
in `summary()`.
"""
if r_matrix.shape[1] != self.k_fe:
raise ValueError("r_matrix for t-test should have %d columns" % self.k_fe)
d = self.k_re2 + self.k_vc
z0 = np.zeros((r_matrix.shape[0], d))
r_matrix = np.concatenate((r_matrix, z0), axis=1)
tst_rslt = super(MixedLMResults, self).t_test(r_matrix, scale=scale, use_t=use_t)
return tst_rslt
def summary(self, yname=None, xname_fe=None, xname_re=None,
title=None, alpha=.05):
"""
Summarize the mixed model regression results.
Parameters
-----------
yname : string, optional
Default is `y`
xname_fe : list of strings, optional
Fixed effects covariate names
xname_re : list of strings, optional
Random effects covariate names
title : string, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
from statsmodels.iolib import summary2
smry = summary2.Summary()
info = OrderedDict()
info["Model:"] = "MixedLM"
if yname is None:
yname = self.model.endog_names
info["No. Observations:"] = str(self.model.n_totobs)
info["No. Groups:"] = str(self.model.n_groups)
gs = np.array([len(x) for x in self.model.endog_li])
info["Min. group size:"] = "%.0f" % min(gs)
info["Max. group size:"] = "%.0f" % max(gs)
info["Mean group size:"] = "%.1f" % np.mean(gs)
info["Dependent Variable:"] = yname
info["Method:"] = self.method
info["Scale:"] = self.scale
info["Likelihood:"] = self.llf
info["Converged:"] = "Yes" if self.converged else "No"
smry.add_dict(info)
smry.add_title("Mixed Linear Model Regression Results")
float_fmt = "%.3f"
sdf = np.nan * np.ones((self.k_fe + self.k_re2 + self.k_vc, 6))
# Coefficient estimates
sdf[0:self.k_fe, 0] = self.fe_params
# Standard errors
sdf[0:self.k_fe, 1] = np.sqrt(np.diag(self.cov_params()[0:self.k_fe]))
# Z-scores
sdf[0:self.k_fe, 2] = sdf[0:self.k_fe, 0] / sdf[0:self.k_fe, 1]
# p-values
sdf[0:self.k_fe, 3] = 2 * norm.cdf(-np.abs(sdf[0:self.k_fe, 2]))
# Confidence intervals
qm = -norm.ppf(alpha / 2)
sdf[0:self.k_fe, 4] = sdf[0:self.k_fe, 0] - qm * sdf[0:self.k_fe, 1]
sdf[0:self.k_fe, 5] = sdf[0:self.k_fe, 0] + qm * sdf[0:self.k_fe, 1]
# All random effects variances and covariances
jj = self.k_fe
for i in range(self.k_re):
for j in range(i + 1):
sdf[jj, 0] = self.cov_re[i, j]
sdf[jj, 1] = np.sqrt(self.scale) * self.bse[jj]
jj += 1
# Variance components
for i in range(self.k_vc):
sdf[jj, 0] = self.vcomp[i]
sdf[jj, 1] = np.sqrt(self.scale) * self.bse[jj]
jj += 1
sdf = pd.DataFrame(index=self.model.data.param_names, data=sdf)
sdf.columns = ['Coef.', 'Std.Err.', 'z', 'P>|z|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
for col in sdf.columns:
sdf[col] = [float_fmt % x if np.isfinite(x) else ""
for x in sdf[col]]
smry.add_df(sdf, align='r')
return smry
@cache_readonly
def llf(self):
return self.model.loglike(self.params_object, profile_fe=False)
@cache_readonly
def aic(self):
if self.reml:
return np.nan
if self.freepat is not None:
df = self.freepat.get_packed(use_sqrt=False, has_fe=True).sum() + 1
else:
df = self.params.size + 1
return -2 * (self.llf - df)
@cache_readonly
def bic(self):
if self.reml:
return np.nan
if self.freepat is not None:
df = self.freepat.get_packed(use_sqrt=False, has_fe=True).sum() + 1
else:
df = self.params.size + 1
return -2 * self.llf + np.log(self.nobs) * df
def profile_re(self, re_ix, vtype, num_low=5, dist_low=1., num_high=5,
dist_high=1.):
"""
Profile-likelihood inference for variance parameters.
Parameters
----------
re_ix : integer
If vtype is `re`, this value is the index of the variance
parameter for which to construct a profile likelihood. If
`vtype` is 'vc' then `re_ix` is the name of the variance
parameter to be profiled.
vtype : string
Either 're' or 'vc', depending on whether the profile
analysis is for a random effect or a variance component.
num_low : integer
The number of points at which to calculate the likelihood
below the MLE of the parameter of interest.
dist_low : float
The distance below the MLE of the parameter of interest to
begin calculating points on the profile likelihood.
num_high : integer
The number of points at which to calculate the likelihood
abov the MLE of the parameter of interest.
dist_high : float
The distance above the MLE of the parameter of interest to
begin calculating points on the profile likelihood.
Returns
-------
An array with two columns. The first column contains the
values to which the parameter of interest is constrained. The
second column contains the corresponding likelihood values.
Notes
-----
Only variance parameters can be profiled.
"""
pmodel = self.model
k_fe = pmodel.k_fe
k_re = pmodel.k_re
k_vc = pmodel.k_vc
endog, exog, groups = pmodel.endog, pmodel.exog, pmodel.groups
# Need to permute the columns of the random effects design
# matrix so that the profiled variable is in the first column.
if vtype == 're':
ix = np.arange(k_re)
ix[0] = re_ix
ix[re_ix] = 0
exog_re = pmodel.exog_re.copy()[:, ix]
# Permute the covariance structure to match the permuted
# design matrix.
params = self.params_object.copy()
cov_re_unscaled = params.cov_re
cov_re_unscaled = cov_re_unscaled[np.ix_(ix, ix)]
params.cov_re = cov_re_unscaled
ru0 = cov_re_unscaled[0, 0]
# Convert dist_low and dist_high to the profile
# parameterization
cov_re = self.scale * cov_re_unscaled
low = (cov_re[0, 0] - dist_low) / self.scale
high = (cov_re[0, 0] + dist_high) / self.scale
elif vtype == 'vc':
re_ix = self.model._vc_names.index(re_ix)
params = self.params_object.copy()
vcomp = self.vcomp
low = (vcomp[re_ix] - dist_low) / self.scale
high = (vcomp[re_ix] + dist_high) / self.scale
ru0 = vcomp[re_ix] / self.scale
# Define the sequence of values to which the parameter of
# interest will be constrained.
if low <= 0:
raise ValueError("dist_low is too large and would result in a "
"negative variance. Try a smaller value.")
left = np.linspace(low, ru0, num_low + 1)
right = np.linspace(ru0, high, num_high+1)[1:]
rvalues = np.concatenate((left, right))
# Indicators of which parameters are free and fixed.
free = MixedLMParams(k_fe, k_re, k_vc)
if self.freepat is None:
free.fe_params = np.ones(k_fe)
vcomp = np.ones(k_vc)
mat = np.ones((k_re, k_re))
else:
# If a freepat already has been specified, we add the
# constraint to it.
free.fe_params = self.freepat.fe_params
vcomp = self.freepat.vcomp
mat = self.freepat.cov_re
if vtype == 're':
mat = mat[np.ix_(ix, ix)]
if vtype == 're':
mat[0, 0] = 0
else:
vcomp[re_ix] = 0
free.cov_re = mat
free.vcomp = vcomp
klass = self.model.__class__
init_kwargs = pmodel._get_init_kwds()
if vtype == 're':
init_kwargs['exog_re'] = exog_re
likev = []
for x in rvalues:
model = klass(endog, exog, **init_kwargs)
if vtype == 're':
cov_re = params.cov_re.copy()
cov_re[0, 0] = x
params.cov_re = cov_re
else:
params.vcomp[re_ix] = x
# TODO should use fit_kwargs
rslt = model.fit(start_params=params, free=free,
reml=self.reml, cov_pen=self.cov_pen)._results
likev.append([x * rslt.scale, rslt.llf])
likev = np.asarray(likev)
return likev
class MixedLMResultsWrapper(base.LikelihoodResultsWrapper):
_attrs = {'bse_re': ('generic_columns', 'exog_re_names_full'),
'fe_params': ('generic_columns', 'xnames'),
'bse_fe': ('generic_columns', 'xnames'),
'cov_re': ('generic_columns_2d', 'exog_re_names'),
'cov_re_unscaled': ('generic_columns_2d', 'exog_re_names'),
}
_upstream_attrs = base.LikelihoodResultsWrapper._wrap_attrs
_wrap_attrs = base.wrap.union_dicts(_attrs, _upstream_attrs)
_methods = {}
_upstream_methods = base.LikelihoodResultsWrapper._wrap_methods
_wrap_methods = base.wrap.union_dicts(_methods, _upstream_methods)
| bsd-3-clause |
ml-lab/pylearn2 | pylearn2/train_extensions/tests/test_roc_auc.py | 17 | 5778 | """
Tests for ROC AUC.
"""
from pylearn2.config import yaml_parse
from pylearn2.testing.skip import skip_if_no_sklearn
def test_roc_auc():
"""Test RocAucChannel."""
skip_if_no_sklearn()
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
def test_roc_auc_one_vs_rest():
"""Test one vs. rest RocAucChannel."""
skip_if_no_sklearn()
trainer = yaml_parse.load(test_yaml_ovr)
trainer.main_loop()
def test_roc_auc_one_vs_one():
"""Test one vs. rest RocAucChannel."""
skip_if_no_sklearn()
trainer = yaml_parse.load(test_yaml_ovo)
trainer.main_loop()
test_yaml = """
!obj:pylearn2.train.Train {
dataset:
&train !obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 10,
dim: 10,
num_classes: 2,
},
model: !obj:pylearn2.models.mlp.MLP {
nvis: 10,
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: h0,
dim: 10,
irange: 0.05,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: y,
n_classes: 2,
irange: 0.,
}
],
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
monitoring_dataset: {
'train': *train,
},
batches_per_iter: 1,
monitoring_batches: 1,
termination_criterion: !obj:pylearn2.termination_criteria.And {
criteria: [
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
!obj:pylearn2.termination_criteria.MonitorBased {
channel_name: train_y_roc_auc,
prop_decrease: 0.,
N: 1,
},
],
},
},
extensions: [
!obj:pylearn2.train_extensions.roc_auc.RocAucChannel {},
],
}
"""
test_yaml_ovr = """
!obj:pylearn2.train.Train {
dataset:
&train !obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 10,
dim: 10,
num_classes: 3,
},
model: !obj:pylearn2.models.mlp.MLP {
nvis: 10,
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: h0,
dim: 10,
irange: 0.05,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: y,
n_classes: 3,
irange: 0.,
}
],
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
monitoring_dataset: {
'train': *train,
},
batches_per_iter: 1,
monitoring_batches: 1,
termination_criterion: !obj:pylearn2.termination_criteria.And {
criteria: [
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
!obj:pylearn2.termination_criteria.MonitorBased {
channel_name: train_y_roc_auc,
prop_decrease: 0.,
N: 1,
},
],
},
},
extensions: [
!obj:pylearn2.train_extensions.roc_auc.RocAucChannel {
channel_name_suffix: roc_auc-0vX,
positive_class_index: 0,
},
!obj:pylearn2.train_extensions.roc_auc.RocAucChannel {
channel_name_suffix: roc_auc-1vX,
positive_class_index: 1,
},
!obj:pylearn2.train_extensions.roc_auc.RocAucChannel {
channel_name_suffix: roc_auc-2vX,
positive_class_index: 2,
},
],
}
"""
test_yaml_ovo = """
!obj:pylearn2.train.Train {
dataset:
&train !obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 10,
dim: 10,
num_classes: 3,
},
model: !obj:pylearn2.models.mlp.MLP {
nvis: 10,
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: h0,
dim: 10,
irange: 0.05,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: y,
n_classes: 3,
irange: 0.,
}
],
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
monitoring_dataset: {
'train': *train,
},
batches_per_iter: 1,
monitoring_batches: 1,
termination_criterion: !obj:pylearn2.termination_criteria.And {
criteria: [
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
!obj:pylearn2.termination_criteria.MonitorBased {
channel_name: train_y_roc_auc,
prop_decrease: 0.,
N: 1,
},
],
},
},
extensions: [
!obj:pylearn2.train_extensions.roc_auc.RocAucChannel {
channel_name_suffix: roc_auc-0v1,
positive_class_index: 0,
negative_class_index: 1,
},
!obj:pylearn2.train_extensions.roc_auc.RocAucChannel {
channel_name_suffix: roc_auc-0v2,
positive_class_index: 0,
negative_class_index: 2,
},
!obj:pylearn2.train_extensions.roc_auc.RocAucChannel {
channel_name_suffix: roc_auc-1v2,
positive_class_index: 1,
negative_class_index: 2,
},
],
}
"""
| bsd-3-clause |
YinongLong/scikit-learn | sklearn/tests/test_multiclass.py | 7 | 24017 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.utils.multiclass import check_classification_targets, type_of_target
from sklearn.utils import shuffle
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_check_classification_targets():
# Test that check_classification_target return correct type. #5782
y = np.array([0.0, 1.1, 2.0, 3.0])
msg = type_of_target(y)
assert_raise_message(ValueError, msg, check_classification_targets, y)
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_partial_fit():
# Test if partial_fit is working as intented
X, y = shuffle(iris.data, iris.target, random_state=0)
ovr = OneVsRestClassifier(MultinomialNB())
ovr.partial_fit(X[:100], y[:100], np.unique(y))
ovr.partial_fit(X[100:], y[100:])
pred = ovr.predict(X)
ovr2 = OneVsRestClassifier(MultinomialNB())
pred2 = ovr2.fit(X, y).predict(X)
assert_almost_equal(pred, pred2)
assert_equal(len(ovr.estimators_), len(np.unique(y)))
assert_greater(np.mean(y == pred), 0.65)
# Test when mini batches doesn't have all classes
ovr = OneVsRestClassifier(MultinomialNB())
ovr.partial_fit(iris.data[:60], iris.target[:60], np.unique(iris.target))
ovr.partial_fit(iris.data[60:], iris.target[60:])
pred = ovr.predict(iris.data)
ovr2 = OneVsRestClassifier(MultinomialNB())
pred2 = ovr2.fit(iris.data, iris.target).predict(iris.data)
assert_almost_equal(pred, pred2)
assert_equal(len(ovr.estimators_), len(np.unique(iris.target)))
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
iris_data_list = [list(a) for a in iris.data]
prediction_from_list = ovo.fit(iris_data_list,
list(iris.target)).predict(iris_data_list)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_partial_fit_predict():
X, y = shuffle(iris.data, iris.target)
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(X[:100], y[:100], np.unique(y))
ovo1.partial_fit(X[100:], y[100:])
pred1 = ovo1.predict(X)
ovo2 = OneVsOneClassifier(MultinomialNB())
ovo2.fit(X, y)
pred2 = ovo2.predict(X)
assert_equal(len(ovo1.estimators_), n_classes * (n_classes - 1) / 2)
assert_greater(np.mean(y == pred1), 0.65)
assert_almost_equal(pred1, pred2)
# Test when mini-batches don't have all target classes
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(iris.data[:60], iris.target[:60], np.unique(iris.target))
ovo1.partial_fit(iris.data[60:], iris.target[60:])
pred1 = ovo1.predict(iris.data)
ovo2 = OneVsOneClassifier(MultinomialNB())
pred2 = ovo2.fit(iris.data, iris.target).predict(iris.data)
assert_almost_equal(pred1, pred2)
assert_equal(len(ovo1.estimators_), len(np.unique(iris.target)))
assert_greater(np.mean(iris.target == pred1), 0.65)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
| bsd-3-clause |
oxtopus/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/cbook.py | 69 | 42525 | """
A collection of utility functions and classes. Many (but not all)
from the Python Cookbook -- hence the name cbook
"""
from __future__ import generators
import re, os, errno, sys, StringIO, traceback, locale, threading, types
import time, datetime
import warnings
import numpy as np
import numpy.ma as ma
from weakref import ref
major, minor1, minor2, s, tmp = sys.version_info
# on some systems, locale.getpreferredencoding returns None, which can break unicode
preferredencoding = locale.getpreferredencoding()
def unicode_safe(s):
if preferredencoding is None: return unicode(s)
else: return unicode(s, preferredencoding)
class converter:
"""
Base class for handling string -> python type with support for
missing values
"""
def __init__(self, missing='Null', missingval=None):
self.missing = missing
self.missingval = missingval
def __call__(self, s):
if s==self.missing: return self.missingval
return s
def is_missing(self, s):
return not s.strip() or s==self.missing
class tostr(converter):
'convert to string or None'
def __init__(self, missing='Null', missingval=''):
converter.__init__(self, missing=missing, missingval=missingval)
class todatetime(converter):
'convert to a datetime or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s): return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.datetime(*tup[:6])
class todate(converter):
'convert to a date or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s): return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.date(*tup[:3])
class tofloat(converter):
'convert to a float or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
self.missingval = missingval
def __call__(self, s):
if self.is_missing(s): return self.missingval
return float(s)
class toint(converter):
'convert to an int or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
def __call__(self, s):
if self.is_missing(s): return self.missingval
return int(s)
class CallbackRegistry:
"""
Handle registering and disconnecting for a set of signals and
callbacks::
signals = 'eat', 'drink', 'be merry'
def oneat(x):
print 'eat', x
def ondrink(x):
print 'drink', x
callbacks = CallbackRegistry(signals)
ideat = callbacks.connect('eat', oneat)
iddrink = callbacks.connect('drink', ondrink)
#tmp = callbacks.connect('drunk', ondrink) # this will raise a ValueError
callbacks.process('drink', 123) # will call oneat
callbacks.process('eat', 456) # will call ondrink
callbacks.process('be merry', 456) # nothing will be called
callbacks.disconnect(ideat) # disconnect oneat
callbacks.process('eat', 456) # nothing will be called
"""
def __init__(self, signals):
'*signals* is a sequence of valid signals'
self.signals = set(signals)
# callbacks is a dict mapping the signal to a dictionary
# mapping callback id to the callback function
self.callbacks = dict([(s, dict()) for s in signals])
self._cid = 0
def _check_signal(self, s):
'make sure *s* is a valid signal or raise a ValueError'
if s not in self.signals:
signals = list(self.signals)
signals.sort()
raise ValueError('Unknown signal "%s"; valid signals are %s'%(s, signals))
def connect(self, s, func):
"""
register *func* to be called when a signal *s* is generated
func will be called
"""
self._check_signal(s)
self._cid +=1
self.callbacks[s][self._cid] = func
return self._cid
def disconnect(self, cid):
"""
disconnect the callback registered with callback id *cid*
"""
for eventname, callbackd in self.callbacks.items():
try: del callbackd[cid]
except KeyError: continue
else: return
def process(self, s, *args, **kwargs):
"""
process signal *s*. All of the functions registered to receive
callbacks on *s* will be called with *\*args* and *\*\*kwargs*
"""
self._check_signal(s)
for func in self.callbacks[s].values():
func(*args, **kwargs)
class Scheduler(threading.Thread):
"""
Base class for timeout and idle scheduling
"""
idlelock = threading.Lock()
id = 0
def __init__(self):
threading.Thread.__init__(self)
self.id = Scheduler.id
self._stopped = False
Scheduler.id += 1
self._stopevent = threading.Event()
def stop(self):
if self._stopped: return
self._stopevent.set()
self.join()
self._stopped = True
class Timeout(Scheduler):
"""
Schedule recurring events with a wait time in seconds
"""
def __init__(self, wait, func):
Scheduler.__init__(self)
self.wait = wait
self.func = func
def run(self):
while not self._stopevent.isSet():
self._stopevent.wait(self.wait)
Scheduler.idlelock.acquire()
b = self.func(self)
Scheduler.idlelock.release()
if not b: break
class Idle(Scheduler):
"""
Schedule callbacks when scheduler is idle
"""
# the prototype impl is a bit of a poor man's idle handler. It
# just implements a short wait time. But it will provide a
# placeholder for a proper impl ater
waittime = 0.05
def __init__(self, func):
Scheduler.__init__(self)
self.func = func
def run(self):
while not self._stopevent.isSet():
self._stopevent.wait(Idle.waittime)
Scheduler.idlelock.acquire()
b = self.func(self)
Scheduler.idlelock.release()
if not b: break
class silent_list(list):
"""
override repr when returning a list of matplotlib artists to
prevent long, meaningless output. This is meant to be used for a
homogeneous list of a give type
"""
def __init__(self, type, seq=None):
self.type = type
if seq is not None: self.extend(seq)
def __repr__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def __str__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def strip_math(s):
'remove latex formatting from mathtext'
remove = (r'\mathdefault', r'\rm', r'\cal', r'\tt', r'\it', '\\', '{', '}')
s = s[1:-1]
for r in remove: s = s.replace(r,'')
return s
class Bunch:
"""
Often we want to just collect a bunch of stuff together, naming each
item of the bunch; a dictionary's OK for that, but a small do- nothing
class is even handier, and prettier to use. Whenever you want to
group a few variables:
>>> point = Bunch(datum=2, squared=4, coord=12)
>>> point.datum
By: Alex Martelli
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52308
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def unique(x):
'Return a list of unique elements of *x*'
return dict([ (val, 1) for val in x]).keys()
def iterable(obj):
'return true if *obj* is iterable'
try: len(obj)
except: return False
return True
def is_string_like(obj):
'Return True if *obj* looks like a string'
if isinstance(obj, (str, unicode)): return True
# numpy strings are subclass of str, ma strings are not
if ma.isMaskedArray(obj):
if obj.ndim == 0 and obj.dtype.kind in 'SU':
return True
else:
return False
try: obj + ''
except (TypeError, ValueError): return False
return True
def is_sequence_of_strings(obj):
"""
Returns true if *obj* is iterable and contains strings
"""
if not iterable(obj): return False
if is_string_like(obj): return False
for o in obj:
if not is_string_like(o): return False
return True
def is_writable_file_like(obj):
'return true if *obj* looks like a file object with a *write* method'
return hasattr(obj, 'write') and callable(obj.write)
def is_scalar(obj):
'return true if *obj* is not string like and is not iterable'
return not is_string_like(obj) and not iterable(obj)
def is_numlike(obj):
'return true if *obj* looks like a number'
try: obj+1
except TypeError: return False
else: return True
def to_filehandle(fname, flag='r', return_opened=False):
"""
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in .gz. *flag* is a
read/write flag for :func:`file`
"""
if is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, flag)
else:
fh = file(fname, flag)
opened = True
elif hasattr(fname, 'seek'):
fh = fname
opened = False
else:
raise ValueError('fname must be a string or file handle')
if return_opened:
return fh, opened
return fh
def is_scalar_or_string(val):
return is_string_like(val) or not iterable(val)
def flatten(seq, scalarp=is_scalar_or_string):
"""
this generator flattens nested containers such as
>>> l=( ('John', 'Hunter'), (1,23), [[[[42,(5,23)]]]])
so that
>>> for i in flatten(l): print i,
John Hunter 1 23 42 5 23
By: Composite of Holger Krekel and Luther Blissett
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/121294
and Recipe 1.12 in cookbook
"""
for item in seq:
if scalarp(item): yield item
else:
for subitem in flatten(item, scalarp):
yield subitem
class Sorter:
"""
Sort by attribute or item
Example usage::
sort = Sorter()
list = [(1, 2), (4, 8), (0, 3)]
dict = [{'a': 3, 'b': 4}, {'a': 5, 'b': 2}, {'a': 0, 'b': 0},
{'a': 9, 'b': 9}]
sort(list) # default sort
sort(list, 1) # sort by index 1
sort(dict, 'a') # sort a list of dicts by key 'a'
"""
def _helper(self, data, aux, inplace):
aux.sort()
result = [data[i] for junk, i in aux]
if inplace: data[:] = result
return result
def byItem(self, data, itemindex=None, inplace=1):
if itemindex is None:
if inplace:
data.sort()
result = data
else:
result = data[:]
result.sort()
return result
else:
aux = [(data[i][itemindex], i) for i in range(len(data))]
return self._helper(data, aux, inplace)
def byAttribute(self, data, attributename, inplace=1):
aux = [(getattr(data[i],attributename),i) for i in range(len(data))]
return self._helper(data, aux, inplace)
# a couple of handy synonyms
sort = byItem
__call__ = byItem
class Xlator(dict):
"""
All-in-one multiple-string-substitution class
Example usage::
text = "Larry Wall is the creator of Perl"
adict = {
"Larry Wall" : "Guido van Rossum",
"creator" : "Benevolent Dictator for Life",
"Perl" : "Python",
}
print multiple_replace(adict, text)
xlat = Xlator(adict)
print xlat.xlat(text)
"""
def _make_regex(self):
""" Build re object based on the keys of the current dictionary """
return re.compile("|".join(map(re.escape, self.keys())))
def __call__(self, match):
""" Handler invoked for each regex *match* """
return self[match.group(0)]
def xlat(self, text):
""" Translate *text*, returns the modified text. """
return self._make_regex().sub(self, text)
def soundex(name, len=4):
""" soundex module conforming to Odell-Russell algorithm """
# digits holds the soundex values for the alphabet
soundex_digits = '01230120022455012623010202'
sndx = ''
fc = ''
# Translate letters in name to soundex digits
for c in name.upper():
if c.isalpha():
if not fc: fc = c # Remember first letter
d = soundex_digits[ord(c)-ord('A')]
# Duplicate consecutive soundex digits are skipped
if not sndx or (d != sndx[-1]):
sndx += d
# Replace first digit with first letter
sndx = fc + sndx[1:]
# Remove all 0s from the soundex code
sndx = sndx.replace('0', '')
# Return soundex code truncated or 0-padded to len characters
return (sndx + (len * '0'))[:len]
class Null:
""" Null objects always and reliably "do nothing." """
def __init__(self, *args, **kwargs): pass
def __call__(self, *args, **kwargs): return self
def __str__(self): return "Null()"
def __repr__(self): return "Null()"
def __nonzero__(self): return 0
def __getattr__(self, name): return self
def __setattr__(self, name, value): return self
def __delattr__(self, name): return self
def mkdirs(newdir, mode=0777):
"""
make directory *newdir* recursively, and set *mode*. Equivalent to ::
> mkdir -p NEWDIR
> chmod MODE NEWDIR
"""
try:
if not os.path.exists(newdir):
parts = os.path.split(newdir)
for i in range(1, len(parts)+1):
thispart = os.path.join(*parts[:i])
if not os.path.exists(thispart):
os.makedirs(thispart, mode)
except OSError, err:
# Reraise the error unless it's about an already existing directory
if err.errno != errno.EEXIST or not os.path.isdir(newdir):
raise
class GetRealpathAndStat:
def __init__(self):
self._cache = {}
def __call__(self, path):
result = self._cache.get(path)
if result is None:
realpath = os.path.realpath(path)
if sys.platform == 'win32':
stat_key = realpath
else:
stat = os.stat(realpath)
stat_key = (stat.st_ino, stat.st_dev)
result = realpath, stat_key
self._cache[path] = result
return result
get_realpath_and_stat = GetRealpathAndStat()
def dict_delall(d, keys):
'delete all of the *keys* from the :class:`dict` *d*'
for key in keys:
try: del d[key]
except KeyError: pass
class RingBuffer:
""" class that implements a not-yet-full buffer """
def __init__(self,size_max):
self.max = size_max
self.data = []
class __Full:
""" class that implements a full buffer """
def append(self, x):
""" Append an element overwriting the oldest one. """
self.data[self.cur] = x
self.cur = (self.cur+1) % self.max
def get(self):
""" return list of elements in correct order """
return self.data[self.cur:]+self.data[:self.cur]
def append(self,x):
"""append an element at the end of the buffer"""
self.data.append(x)
if len(self.data) == self.max:
self.cur = 0
# Permanently change self's class from non-full to full
self.__class__ = __Full
def get(self):
""" Return a list of elements from the oldest to the newest. """
return self.data
def __get_item__(self, i):
return self.data[i % len(self.data)]
def get_split_ind(seq, N):
"""
*seq* is a list of words. Return the index into seq such that::
len(' '.join(seq[:ind])<=N
"""
sLen = 0
# todo: use Alex's xrange pattern from the cbook for efficiency
for (word, ind) in zip(seq, range(len(seq))):
sLen += len(word) + 1 # +1 to account for the len(' ')
if sLen>=N: return ind
return len(seq)
def wrap(prefix, text, cols):
'wrap *text* with *prefix* at length *cols*'
pad = ' '*len(prefix.expandtabs())
available = cols - len(pad)
seq = text.split(' ')
Nseq = len(seq)
ind = 0
lines = []
while ind<Nseq:
lastInd = ind
ind += get_split_ind(seq[ind:], available)
lines.append(seq[lastInd:ind])
# add the prefix to the first line, pad with spaces otherwise
ret = prefix + ' '.join(lines[0]) + '\n'
for line in lines[1:]:
ret += pad + ' '.join(line) + '\n'
return ret
# A regular expression used to determine the amount of space to
# remove. It looks for the first sequence of spaces immediately
# following the first newline, or at the beginning of the string.
_find_dedent_regex = re.compile("(?:(?:\n\r?)|^)( *)\S")
# A cache to hold the regexs that actually remove the indent.
_dedent_regex = {}
def dedent(s):
"""
Remove excess indentation from docstring *s*.
Discards any leading blank lines, then removes up to n whitespace
characters from each line, where n is the number of leading
whitespace characters in the first line. It differs from
textwrap.dedent in its deletion of leading blank lines and its use
of the first non-blank line to determine the indentation.
It is also faster in most cases.
"""
# This implementation has a somewhat obtuse use of regular
# expressions. However, this function accounted for almost 30% of
# matplotlib startup time, so it is worthy of optimization at all
# costs.
if not s: # includes case of s is None
return ''
match = _find_dedent_regex.match(s)
if match is None:
return s
# This is the number of spaces to remove from the left-hand side.
nshift = match.end(1) - match.start(1)
if nshift == 0:
return s
# Get a regex that will remove *up to* nshift spaces from the
# beginning of each line. If it isn't in the cache, generate it.
unindent = _dedent_regex.get(nshift, None)
if unindent is None:
unindent = re.compile("\n\r? {0,%d}" % nshift)
_dedent_regex[nshift] = unindent
result = unindent.sub("\n", s).strip()
return result
def listFiles(root, patterns='*', recurse=1, return_folders=0):
"""
Recursively list files
from Parmar and Martelli in the Python Cookbook
"""
import os.path, fnmatch
# Expand patterns from semicolon-separated string to list
pattern_list = patterns.split(';')
# Collect input and output arguments into one bunch
class Bunch:
def __init__(self, **kwds): self.__dict__.update(kwds)
arg = Bunch(recurse=recurse, pattern_list=pattern_list,
return_folders=return_folders, results=[])
def visit(arg, dirname, files):
# Append to arg.results all relevant files (and perhaps folders)
for name in files:
fullname = os.path.normpath(os.path.join(dirname, name))
if arg.return_folders or os.path.isfile(fullname):
for pattern in arg.pattern_list:
if fnmatch.fnmatch(name, pattern):
arg.results.append(fullname)
break
# Block recursion if recursion was disallowed
if not arg.recurse: files[:]=[]
os.path.walk(root, visit, arg)
return arg.results
def get_recursive_filelist(args):
"""
Recurs all the files and dirs in *args* ignoring symbolic links
and return the files as a list of strings
"""
files = []
for arg in args:
if os.path.isfile(arg):
files.append(arg)
continue
if os.path.isdir(arg):
newfiles = listFiles(arg, recurse=1, return_folders=1)
files.extend(newfiles)
return [f for f in files if not os.path.islink(f)]
def pieces(seq, num=2):
"Break up the *seq* into *num* tuples"
start = 0
while 1:
item = seq[start:start+num]
if not len(item): break
yield item
start += num
def exception_to_str(s = None):
sh = StringIO.StringIO()
if s is not None: print >>sh, s
traceback.print_exc(file=sh)
return sh.getvalue()
def allequal(seq):
"""
Return *True* if all elements of *seq* compare equal. If *seq* is
0 or 1 length, return *True*
"""
if len(seq)<2: return True
val = seq[0]
for i in xrange(1, len(seq)):
thisval = seq[i]
if thisval != val: return False
return True
def alltrue(seq):
"""
Return *True* if all elements of *seq* evaluate to *True*. If
*seq* is empty, return *False*.
"""
if not len(seq): return False
for val in seq:
if not val: return False
return True
def onetrue(seq):
"""
Return *True* if one element of *seq* is *True*. It *seq* is
empty, return *False*.
"""
if not len(seq): return False
for val in seq:
if val: return True
return False
def allpairs(x):
"""
return all possible pairs in sequence *x*
Condensed by Alex Martelli from this thread_ on c.l.python
.. _thread: http://groups.google.com/groups?q=all+pairs+group:*python*&hl=en&lr=&ie=UTF-8&selm=mailman.4028.1096403649.5135.python-list%40python.org&rnum=1
"""
return [ (s, f) for i, f in enumerate(x) for s in x[i+1:] ]
# python 2.2 dicts don't have pop--but we don't support 2.2 any more
def popd(d, *args):
"""
Should behave like python2.3 :meth:`dict.pop` method; *d* is a
:class:`dict`::
# returns value for key and deletes item; raises a KeyError if key
# is not in dict
val = popd(d, key)
# returns value for key if key exists, else default. Delete key,
# val item if it exists. Will not raise a KeyError
val = popd(d, key, default)
"""
warnings.warn("Use native python dict.pop method", DeprecationWarning)
# warning added 2008/07/22
if len(args)==1:
key = args[0]
val = d[key]
del d[key]
elif len(args)==2:
key, default = args
val = d.get(key, default)
try: del d[key]
except KeyError: pass
return val
class maxdict(dict):
"""
A dictionary with a maximum size; this doesn't override all the
relevant methods to contrain size, just setitem, so use with
caution
"""
def __init__(self, maxsize):
dict.__init__(self)
self.maxsize = maxsize
self._killkeys = []
def __setitem__(self, k, v):
if len(self)>=self.maxsize:
del self[self._killkeys[0]]
del self._killkeys[0]
dict.__setitem__(self, k, v)
self._killkeys.append(k)
class Stack:
"""
Implement a stack where elements can be pushed on and you can move
back and forth. But no pop. Should mimic home / back / forward
in a browser
"""
def __init__(self, default=None):
self.clear()
self._default = default
def __call__(self):
'return the current element, or None'
if not len(self._elements): return self._default
else: return self._elements[self._pos]
def forward(self):
'move the position forward and return the current element'
N = len(self._elements)
if self._pos<N-1: self._pos += 1
return self()
def back(self):
'move the position back and return the current element'
if self._pos>0: self._pos -= 1
return self()
def push(self, o):
"""
push object onto stack at current position - all elements
occurring later than the current position are discarded
"""
self._elements = self._elements[:self._pos+1]
self._elements.append(o)
self._pos = len(self._elements)-1
return self()
def home(self):
'push the first element onto the top of the stack'
if not len(self._elements): return
self.push(self._elements[0])
return self()
def empty(self):
return len(self._elements)==0
def clear(self):
'empty the stack'
self._pos = -1
self._elements = []
def bubble(self, o):
"""
raise *o* to the top of the stack and return *o*. *o* must be
in the stack
"""
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
bubbles = []
for thiso in old:
if thiso==o: bubbles.append(thiso)
else: self.push(thiso)
for thiso in bubbles:
self.push(o)
return o
def remove(self, o):
'remove element *o* from the stack'
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
for thiso in old:
if thiso==o: continue
else: self.push(thiso)
def popall(seq):
'empty a list'
for i in xrange(len(seq)): seq.pop()
def finddir(o, match, case=False):
"""
return all attributes of *o* which match string in match. if case
is True require an exact case match.
"""
if case:
names = [(name,name) for name in dir(o) if is_string_like(name)]
else:
names = [(name.lower(), name) for name in dir(o) if is_string_like(name)]
match = match.lower()
return [orig for name, orig in names if name.find(match)>=0]
def reverse_dict(d):
'reverse the dictionary -- may lose data if values are not unique!'
return dict([(v,k) for k,v in d.items()])
def report_memory(i=0): # argument may go away
'return the memory consumed by process'
pid = os.getpid()
if sys.platform=='sunos5':
a2 = os.popen('ps -p %d -o osz' % pid).readlines()
mem = int(a2[-1].strip())
elif sys.platform.startswith('linux'):
a2 = os.popen('ps -p %d -o rss,sz' % pid).readlines()
mem = int(a2[1].split()[1])
elif sys.platform.startswith('darwin'):
a2 = os.popen('ps -p %d -o rss,vsz' % pid).readlines()
mem = int(a2[1].split()[0])
return mem
_safezip_msg = 'In safezip, len(args[0])=%d but len(args[%d])=%d'
def safezip(*args):
'make sure *args* are equal len before zipping'
Nx = len(args[0])
for i, arg in enumerate(args[1:]):
if len(arg) != Nx:
raise ValueError(_safezip_msg % (Nx, i+1, len(arg)))
return zip(*args)
def issubclass_safe(x, klass):
'return issubclass(x, klass) and return False on a TypeError'
try:
return issubclass(x, klass)
except TypeError:
return False
class MemoryMonitor:
def __init__(self, nmax=20000):
self._nmax = nmax
self._mem = np.zeros((self._nmax,), np.int32)
self.clear()
def clear(self):
self._n = 0
self._overflow = False
def __call__(self):
mem = report_memory()
if self._n < self._nmax:
self._mem[self._n] = mem
self._n += 1
else:
self._overflow = True
return mem
def report(self, segments=4):
n = self._n
segments = min(n, segments)
dn = int(n/segments)
ii = range(0, n, dn)
ii[-1] = n-1
print
print 'memory report: i, mem, dmem, dmem/nloops'
print 0, self._mem[0]
for i in range(1, len(ii)):
di = ii[i] - ii[i-1]
if di == 0:
continue
dm = self._mem[ii[i]] - self._mem[ii[i-1]]
print '%5d %5d %3d %8.3f' % (ii[i], self._mem[ii[i]],
dm, dm / float(di))
if self._overflow:
print "Warning: array size was too small for the number of calls."
def xy(self, i0=0, isub=1):
x = np.arange(i0, self._n, isub)
return x, self._mem[i0:self._n:isub]
def plot(self, i0=0, isub=1, fig=None):
if fig is None:
from pylab import figure, show
fig = figure()
ax = fig.add_subplot(111)
ax.plot(*self.xy(i0, isub))
fig.canvas.draw()
def print_cycles(objects, outstream=sys.stdout, show_progress=False):
"""
*objects*
A list of objects to find cycles in. It is often useful to
pass in gc.garbage to find the cycles that are preventing some
objects from being garbage collected.
*outstream*
The stream for output.
*show_progress*
If True, print the number of objects reached as they are found.
"""
import gc
from types import FrameType
def print_path(path):
for i, step in enumerate(path):
# next "wraps around"
next = path[(i + 1) % len(path)]
outstream.write(" %s -- " % str(type(step)))
if isinstance(step, dict):
for key, val in step.items():
if val is next:
outstream.write("[%s]" % repr(key))
break
if key is next:
outstream.write("[key] = %s" % repr(val))
break
elif isinstance(step, list):
outstream.write("[%d]" % step.index(next))
elif isinstance(step, tuple):
outstream.write("( tuple )")
else:
outstream.write(repr(step))
outstream.write(" ->\n")
outstream.write("\n")
def recurse(obj, start, all, current_path):
if show_progress:
outstream.write("%d\r" % len(all))
all[id(obj)] = None
referents = gc.get_referents(obj)
for referent in referents:
# If we've found our way back to the start, this is
# a cycle, so print it out
if referent is start:
print_path(current_path)
# Don't go back through the original list of objects, or
# through temporary references to the object, since those
# are just an artifact of the cycle detector itself.
elif referent is objects or isinstance(referent, FrameType):
continue
# We haven't seen this object before, so recurse
elif id(referent) not in all:
recurse(referent, start, all, current_path + [obj])
for obj in objects:
outstream.write("Examining: %r\n" % (obj,))
recurse(obj, obj, { }, [])
class Grouper(object):
"""
This class provides a lightweight way to group arbitrary objects
together into disjoint sets when a full-blown graph data structure
would be overkill.
Objects can be joined using :meth:`join`, tested for connectedness
using :meth:`joined`, and all disjoint sets can be retreived by
using the object as an iterator.
The objects being joined must be hashable.
For example:
>>> g = grouper.Grouper()
>>> g.join('a', 'b')
>>> g.join('b', 'c')
>>> g.join('d', 'e')
>>> list(g)
[['a', 'b', 'c'], ['d', 'e']]
>>> g.joined('a', 'b')
True
>>> g.joined('a', 'c')
True
>>> g.joined('a', 'd')
False
"""
def __init__(self, init=[]):
mapping = self._mapping = {}
for x in init:
mapping[ref(x)] = [ref(x)]
def __contains__(self, item):
return ref(item) in self._mapping
def clean(self):
"""
Clean dead weak references from the dictionary
"""
mapping = self._mapping
for key, val in mapping.items():
if key() is None:
del mapping[key]
val.remove(key)
def join(self, a, *args):
"""
Join given arguments into the same set. Accepts one or more
arguments.
"""
mapping = self._mapping
set_a = mapping.setdefault(ref(a), [ref(a)])
for arg in args:
set_b = mapping.get(ref(arg))
if set_b is None:
set_a.append(ref(arg))
mapping[ref(arg)] = set_a
elif set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.extend(set_b)
for elem in set_b:
mapping[elem] = set_a
self.clean()
def joined(self, a, b):
"""
Returns True if *a* and *b* are members of the same set.
"""
self.clean()
mapping = self._mapping
try:
return mapping[ref(a)] is mapping[ref(b)]
except KeyError:
return False
def __iter__(self):
"""
Iterate over each of the disjoint sets as a list.
The iterator is invalid if interleaved with calls to join().
"""
self.clean()
class Token: pass
token = Token()
# Mark each group as we come across if by appending a token,
# and don't yield it twice
for group in self._mapping.itervalues():
if not group[-1] is token:
yield [x() for x in group]
group.append(token)
# Cleanup the tokens
for group in self._mapping.itervalues():
if group[-1] is token:
del group[-1]
def get_siblings(self, a):
"""
Returns all of the items joined with *a*, including itself.
"""
self.clean()
siblings = self._mapping.get(ref(a), [ref(a)])
return [x() for x in siblings]
def simple_linear_interpolation(a, steps):
steps = np.floor(steps)
new_length = ((len(a) - 1) * steps) + 1
new_shape = list(a.shape)
new_shape[0] = new_length
result = np.zeros(new_shape, a.dtype)
result[0] = a[0]
a0 = a[0:-1]
a1 = a[1: ]
delta = ((a1 - a0) / steps)
for i in range(1, int(steps)):
result[i::steps] = delta * i + a0
result[steps::steps] = a1
return result
def recursive_remove(path):
if os.path.isdir(path):
for fname in glob.glob(os.path.join(path, '*')) + glob.glob(os.path.join(path, '.*')):
if os.path.isdir(fname):
recursive_remove(fname)
os.removedirs(fname)
else:
os.remove(fname)
#os.removedirs(path)
else:
os.remove(path)
def delete_masked_points(*args):
"""
Find all masked and/or non-finite points in a set of arguments,
and return the arguments with only the unmasked points remaining.
Arguments can be in any of 5 categories:
1) 1-D masked arrays
2) 1-D ndarrays
3) ndarrays with more than one dimension
4) other non-string iterables
5) anything else
The first argument must be in one of the first four categories;
any argument with a length differing from that of the first
argument (and hence anything in category 5) then will be
passed through unchanged.
Masks are obtained from all arguments of the correct length
in categories 1, 2, and 4; a point is bad if masked in a masked
array or if it is a nan or inf. No attempt is made to
extract a mask from categories 2, 3, and 4 if :meth:`np.isfinite`
does not yield a Boolean array.
All input arguments that are not passed unchanged are returned
as ndarrays after removing the points or rows corresponding to
masks in any of the arguments.
A vastly simpler version of this function was originally
written as a helper for Axes.scatter().
"""
if not len(args):
return ()
if (is_string_like(args[0]) or not iterable(args[0])):
raise ValueError("First argument must be a sequence")
nrecs = len(args[0])
margs = []
seqlist = [False] * len(args)
for i, x in enumerate(args):
if (not is_string_like(x)) and iterable(x) and len(x) == nrecs:
seqlist[i] = True
if ma.isMA(x):
if x.ndim > 1:
raise ValueError("Masked arrays must be 1-D")
else:
x = np.asarray(x)
margs.append(x)
masks = [] # list of masks that are True where good
for i, x in enumerate(margs):
if seqlist[i]:
if x.ndim > 1:
continue # Don't try to get nan locations unless 1-D.
if ma.isMA(x):
masks.append(~ma.getmaskarray(x)) # invert the mask
xd = x.data
else:
xd = x
try:
mask = np.isfinite(xd)
if isinstance(mask, np.ndarray):
masks.append(mask)
except: #Fixme: put in tuple of possible exceptions?
pass
if len(masks):
mask = reduce(np.logical_and, masks)
igood = mask.nonzero()[0]
if len(igood) < nrecs:
for i, x in enumerate(margs):
if seqlist[i]:
margs[i] = x.take(igood, axis=0)
for i, x in enumerate(margs):
if seqlist[i] and ma.isMA(x):
margs[i] = x.filled()
return margs
def unmasked_index_ranges(mask, compressed = True):
'''
Find index ranges where *mask* is *False*.
*mask* will be flattened if it is not already 1-D.
Returns Nx2 :class:`numpy.ndarray` with each row the start and stop
indices for slices of the compressed :class:`numpy.ndarray`
corresponding to each of *N* uninterrupted runs of unmasked
values. If optional argument *compressed* is *False*, it returns
the start and stop indices into the original :class:`numpy.ndarray`,
not the compressed :class:`numpy.ndarray`. Returns *None* if there
are no unmasked values.
Example::
y = ma.array(np.arange(5), mask = [0,0,1,0,0])
ii = unmasked_index_ranges(ma.getmaskarray(y))
# returns array [[0,2,] [2,4,]]
y.compressed()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
ii = unmasked_index_ranges(ma.getmaskarray(y), compressed=False)
# returns array [[0, 2], [3, 5]]
y.filled()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
Prior to the transforms refactoring, this was used to support
masked arrays in Line2D.
'''
mask = mask.reshape(mask.size)
m = np.concatenate(((1,), mask, (1,)))
indices = np.arange(len(mask) + 1)
mdif = m[1:] - m[:-1]
i0 = np.compress(mdif == -1, indices)
i1 = np.compress(mdif == 1, indices)
assert len(i0) == len(i1)
if len(i1) == 0:
return None # Maybe this should be np.zeros((0,2), dtype=int)
if not compressed:
return np.concatenate((i0[:, np.newaxis], i1[:, np.newaxis]), axis=1)
seglengths = i1 - i0
breakpoints = np.cumsum(seglengths)
ic0 = np.concatenate(((0,), breakpoints[:-1]))
ic1 = breakpoints
return np.concatenate((ic0[:, np.newaxis], ic1[:, np.newaxis]), axis=1)
# a dict to cross-map linestyle arguments
_linestyles = [('-', 'solid'),
('--', 'dashed'),
('-.', 'dashdot'),
(':', 'dotted')]
ls_mapper = dict(_linestyles)
ls_mapper.update([(ls[1], ls[0]) for ls in _linestyles])
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('less_simple_linear_interpolation has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.less_simple_linear_interpolation( x, y, xi, extrap=extrap )
def isvector(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('isvector has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.isvector( x, y, xi, extrap=extrap )
def vector_lengths( X, P=2., axis=None ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('vector_lengths has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.vector_lengths( X, P=2., axis=axis )
def distances_along_curve( X ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('distances_along_curve has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.distances_along_curve( X )
def path_length(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('path_length has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.path_length(X)
def is_closed_polygon(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('is_closed_polygon has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.is_closed_polygon(X)
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('quad2cubic has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y)
if __name__=='__main__':
assert( allequal([1,1,1]) )
assert(not allequal([1,1,0]) )
assert( allequal([]) )
assert( allequal(('a', 'a')))
assert( not allequal(('a', 'b')))
| gpl-3.0 |
thilbern/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 251 | 2022 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
alexeyum/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 69 | 3894 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import MultiTaskElasticNet
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
name = NoSparseClassifier.__name__
msg = "Estimator " + name + " doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(AdaBoostClassifier)
check_estimator(MultiTaskElasticNet)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| bsd-3-clause |
saiwing-yeung/scikit-learn | examples/svm/plot_svm_regression.py | 120 | 1520 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
lw = 2
plt.scatter(X, y, color='darkorange', label='data')
plt.hold('on')
plt.plot(X, y_rbf, color='navy', lw=lw, label='RBF model')
plt.plot(X, y_lin, color='c', lw=lw, label='Linear model')
plt.plot(X, y_poly, color='cornflowerblue', lw=lw, label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/tests/test_take.py | 8 | 19159 | # -*- coding: utf-8 -*-
import re
from datetime import datetime
import nose
import numpy as np
from pandas.compat import long
import pandas.core.algorithms as algos
import pandas.util.testing as tm
from pandas.tslib import iNaT
_multiprocess_can_split_ = True
class TestTake(tm.TestCase):
# standard incompatible fill error
fill_error = re.compile("Incompatible type for fill_value")
_multiprocess_can_split_ = True
def test_1d_with_out(self):
def _test_dtype(dtype, can_hold_na, writeable=True):
data = np.random.randint(0, 2, 4).astype(dtype)
data.flags.writeable = writeable
indexer = [2, 1, 0, 1]
out = np.empty(4, dtype=dtype)
algos.take_1d(data, indexer, out=out)
expected = data.take(indexer)
tm.assert_almost_equal(out, expected)
indexer = [2, 1, 0, -1]
out = np.empty(4, dtype=dtype)
if can_hold_na:
algos.take_1d(data, indexer, out=out)
expected = data.take(indexer)
expected[3] = np.nan
tm.assert_almost_equal(out, expected)
else:
with tm.assertRaisesRegexp(TypeError, self.fill_error):
algos.take_1d(data, indexer, out=out)
# no exception o/w
data.take(indexer, out=out)
for writeable in [True, False]:
# Check that take_nd works both with writeable arrays (in which
# case fast typed memoryviews implementation) and read-only
# arrays alike.
_test_dtype(np.float64, True, writeable=writeable)
_test_dtype(np.float32, True, writeable=writeable)
_test_dtype(np.uint64, False, writeable=writeable)
_test_dtype(np.uint32, False, writeable=writeable)
_test_dtype(np.uint16, False, writeable=writeable)
_test_dtype(np.uint8, False, writeable=writeable)
_test_dtype(np.int64, False, writeable=writeable)
_test_dtype(np.int32, False, writeable=writeable)
_test_dtype(np.int16, False, writeable=writeable)
_test_dtype(np.int8, False, writeable=writeable)
_test_dtype(np.object_, True, writeable=writeable)
_test_dtype(np.bool, False, writeable=writeable)
def test_1d_fill_nonna(self):
def _test_dtype(dtype, fill_value, out_dtype):
data = np.random.randint(0, 2, 4).astype(dtype)
indexer = [2, 1, 0, -1]
result = algos.take_1d(data, indexer, fill_value=fill_value)
assert ((result[[0, 1, 2]] == data[[2, 1, 0]]).all())
assert (result[3] == fill_value)
assert (result.dtype == out_dtype)
indexer = [2, 1, 0, 1]
result = algos.take_1d(data, indexer, fill_value=fill_value)
assert ((result[[0, 1, 2, 3]] == data[indexer]).all())
assert (result.dtype == dtype)
_test_dtype(np.int8, np.int16(127), np.int8)
_test_dtype(np.int8, np.int16(128), np.int16)
_test_dtype(np.int32, 1, np.int32)
_test_dtype(np.int32, 2.0, np.float64)
_test_dtype(np.int32, 3.0 + 4.0j, np.complex128)
_test_dtype(np.int32, True, np.object_)
_test_dtype(np.int32, '', np.object_)
_test_dtype(np.float64, 1, np.float64)
_test_dtype(np.float64, 2.0, np.float64)
_test_dtype(np.float64, 3.0 + 4.0j, np.complex128)
_test_dtype(np.float64, True, np.object_)
_test_dtype(np.float64, '', np.object_)
_test_dtype(np.complex128, 1, np.complex128)
_test_dtype(np.complex128, 2.0, np.complex128)
_test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)
_test_dtype(np.complex128, True, np.object_)
_test_dtype(np.complex128, '', np.object_)
_test_dtype(np.bool_, 1, np.object_)
_test_dtype(np.bool_, 2.0, np.object_)
_test_dtype(np.bool_, 3.0 + 4.0j, np.object_)
_test_dtype(np.bool_, True, np.bool_)
_test_dtype(np.bool_, '', np.object_)
def test_2d_with_out(self):
def _test_dtype(dtype, can_hold_na, writeable=True):
data = np.random.randint(0, 2, (5, 3)).astype(dtype)
data.flags.writeable = writeable
indexer = [2, 1, 0, 1]
out0 = np.empty((4, 3), dtype=dtype)
out1 = np.empty((5, 4), dtype=dtype)
algos.take_nd(data, indexer, out=out0, axis=0)
algos.take_nd(data, indexer, out=out1, axis=1)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
indexer = [2, 1, 0, -1]
out0 = np.empty((4, 3), dtype=dtype)
out1 = np.empty((5, 4), dtype=dtype)
if can_hold_na:
algos.take_nd(data, indexer, out=out0, axis=0)
algos.take_nd(data, indexer, out=out1, axis=1)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected0[3, :] = np.nan
expected1[:, 3] = np.nan
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
else:
for i, out in enumerate([out0, out1]):
with tm.assertRaisesRegexp(TypeError, self.fill_error):
algos.take_nd(data, indexer, out=out, axis=i)
# no exception o/w
data.take(indexer, out=out, axis=i)
for writeable in [True, False]:
# Check that take_nd works both with writeable arrays (in which
# case fast typed memoryviews implementation) and read-only
# arrays alike.
_test_dtype(np.float64, True, writeable=writeable)
_test_dtype(np.float32, True, writeable=writeable)
_test_dtype(np.uint64, False, writeable=writeable)
_test_dtype(np.uint32, False, writeable=writeable)
_test_dtype(np.uint16, False, writeable=writeable)
_test_dtype(np.uint8, False, writeable=writeable)
_test_dtype(np.int64, False, writeable=writeable)
_test_dtype(np.int32, False, writeable=writeable)
_test_dtype(np.int16, False, writeable=writeable)
_test_dtype(np.int8, False, writeable=writeable)
_test_dtype(np.object_, True, writeable=writeable)
_test_dtype(np.bool, False, writeable=writeable)
def test_2d_fill_nonna(self):
def _test_dtype(dtype, fill_value, out_dtype):
data = np.random.randint(0, 2, (5, 3)).astype(dtype)
indexer = [2, 1, 0, -1]
result = algos.take_nd(data, indexer, axis=0,
fill_value=fill_value)
assert ((result[[0, 1, 2], :] == data[[2, 1, 0], :]).all())
assert ((result[3, :] == fill_value).all())
assert (result.dtype == out_dtype)
result = algos.take_nd(data, indexer, axis=1,
fill_value=fill_value)
assert ((result[:, [0, 1, 2]] == data[:, [2, 1, 0]]).all())
assert ((result[:, 3] == fill_value).all())
assert (result.dtype == out_dtype)
indexer = [2, 1, 0, 1]
result = algos.take_nd(data, indexer, axis=0,
fill_value=fill_value)
assert ((result[[0, 1, 2, 3], :] == data[indexer, :]).all())
assert (result.dtype == dtype)
result = algos.take_nd(data, indexer, axis=1,
fill_value=fill_value)
assert ((result[:, [0, 1, 2, 3]] == data[:, indexer]).all())
assert (result.dtype == dtype)
_test_dtype(np.int8, np.int16(127), np.int8)
_test_dtype(np.int8, np.int16(128), np.int16)
_test_dtype(np.int32, 1, np.int32)
_test_dtype(np.int32, 2.0, np.float64)
_test_dtype(np.int32, 3.0 + 4.0j, np.complex128)
_test_dtype(np.int32, True, np.object_)
_test_dtype(np.int32, '', np.object_)
_test_dtype(np.float64, 1, np.float64)
_test_dtype(np.float64, 2.0, np.float64)
_test_dtype(np.float64, 3.0 + 4.0j, np.complex128)
_test_dtype(np.float64, True, np.object_)
_test_dtype(np.float64, '', np.object_)
_test_dtype(np.complex128, 1, np.complex128)
_test_dtype(np.complex128, 2.0, np.complex128)
_test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)
_test_dtype(np.complex128, True, np.object_)
_test_dtype(np.complex128, '', np.object_)
_test_dtype(np.bool_, 1, np.object_)
_test_dtype(np.bool_, 2.0, np.object_)
_test_dtype(np.bool_, 3.0 + 4.0j, np.object_)
_test_dtype(np.bool_, True, np.bool_)
_test_dtype(np.bool_, '', np.object_)
def test_3d_with_out(self):
def _test_dtype(dtype, can_hold_na):
data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype)
indexer = [2, 1, 0, 1]
out0 = np.empty((4, 4, 3), dtype=dtype)
out1 = np.empty((5, 4, 3), dtype=dtype)
out2 = np.empty((5, 4, 4), dtype=dtype)
algos.take_nd(data, indexer, out=out0, axis=0)
algos.take_nd(data, indexer, out=out1, axis=1)
algos.take_nd(data, indexer, out=out2, axis=2)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected2 = data.take(indexer, axis=2)
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
tm.assert_almost_equal(out2, expected2)
indexer = [2, 1, 0, -1]
out0 = np.empty((4, 4, 3), dtype=dtype)
out1 = np.empty((5, 4, 3), dtype=dtype)
out2 = np.empty((5, 4, 4), dtype=dtype)
if can_hold_na:
algos.take_nd(data, indexer, out=out0, axis=0)
algos.take_nd(data, indexer, out=out1, axis=1)
algos.take_nd(data, indexer, out=out2, axis=2)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected2 = data.take(indexer, axis=2)
expected0[3, :, :] = np.nan
expected1[:, 3, :] = np.nan
expected2[:, :, 3] = np.nan
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
tm.assert_almost_equal(out2, expected2)
else:
for i, out in enumerate([out0, out1, out2]):
with tm.assertRaisesRegexp(TypeError, self.fill_error):
algos.take_nd(data, indexer, out=out, axis=i)
# no exception o/w
data.take(indexer, out=out, axis=i)
_test_dtype(np.float64, True)
_test_dtype(np.float32, True)
_test_dtype(np.uint64, False)
_test_dtype(np.uint32, False)
_test_dtype(np.uint16, False)
_test_dtype(np.uint8, False)
_test_dtype(np.int64, False)
_test_dtype(np.int32, False)
_test_dtype(np.int16, False)
_test_dtype(np.int8, False)
_test_dtype(np.object_, True)
_test_dtype(np.bool, False)
def test_3d_fill_nonna(self):
def _test_dtype(dtype, fill_value, out_dtype):
data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype)
indexer = [2, 1, 0, -1]
result = algos.take_nd(data, indexer, axis=0,
fill_value=fill_value)
assert ((result[[0, 1, 2], :, :] == data[[2, 1, 0], :, :]).all())
assert ((result[3, :, :] == fill_value).all())
assert (result.dtype == out_dtype)
result = algos.take_nd(data, indexer, axis=1,
fill_value=fill_value)
assert ((result[:, [0, 1, 2], :] == data[:, [2, 1, 0], :]).all())
assert ((result[:, 3, :] == fill_value).all())
assert (result.dtype == out_dtype)
result = algos.take_nd(data, indexer, axis=2,
fill_value=fill_value)
assert ((result[:, :, [0, 1, 2]] == data[:, :, [2, 1, 0]]).all())
assert ((result[:, :, 3] == fill_value).all())
assert (result.dtype == out_dtype)
indexer = [2, 1, 0, 1]
result = algos.take_nd(data, indexer, axis=0,
fill_value=fill_value)
assert ((result[[0, 1, 2, 3], :, :] == data[indexer, :, :]).all())
assert (result.dtype == dtype)
result = algos.take_nd(data, indexer, axis=1,
fill_value=fill_value)
assert ((result[:, [0, 1, 2, 3], :] == data[:, indexer, :]).all())
assert (result.dtype == dtype)
result = algos.take_nd(data, indexer, axis=2,
fill_value=fill_value)
assert ((result[:, :, [0, 1, 2, 3]] == data[:, :, indexer]).all())
assert (result.dtype == dtype)
_test_dtype(np.int8, np.int16(127), np.int8)
_test_dtype(np.int8, np.int16(128), np.int16)
_test_dtype(np.int32, 1, np.int32)
_test_dtype(np.int32, 2.0, np.float64)
_test_dtype(np.int32, 3.0 + 4.0j, np.complex128)
_test_dtype(np.int32, True, np.object_)
_test_dtype(np.int32, '', np.object_)
_test_dtype(np.float64, 1, np.float64)
_test_dtype(np.float64, 2.0, np.float64)
_test_dtype(np.float64, 3.0 + 4.0j, np.complex128)
_test_dtype(np.float64, True, np.object_)
_test_dtype(np.float64, '', np.object_)
_test_dtype(np.complex128, 1, np.complex128)
_test_dtype(np.complex128, 2.0, np.complex128)
_test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)
_test_dtype(np.complex128, True, np.object_)
_test_dtype(np.complex128, '', np.object_)
_test_dtype(np.bool_, 1, np.object_)
_test_dtype(np.bool_, 2.0, np.object_)
_test_dtype(np.bool_, 3.0 + 4.0j, np.object_)
_test_dtype(np.bool_, True, np.bool_)
_test_dtype(np.bool_, '', np.object_)
def test_1d_other_dtypes(self):
arr = np.random.randn(10).astype(np.float32)
indexer = [1, 2, 3, -1]
result = algos.take_1d(arr, indexer)
expected = arr.take(indexer)
expected[-1] = np.nan
tm.assert_almost_equal(result, expected)
def test_2d_other_dtypes(self):
arr = np.random.randn(10, 5).astype(np.float32)
indexer = [1, 2, 3, -1]
# axis=0
result = algos.take_nd(arr, indexer, axis=0)
expected = arr.take(indexer, axis=0)
expected[-1] = np.nan
tm.assert_almost_equal(result, expected)
# axis=1
result = algos.take_nd(arr, indexer, axis=1)
expected = arr.take(indexer, axis=1)
expected[:, -1] = np.nan
tm.assert_almost_equal(result, expected)
def test_1d_bool(self):
arr = np.array([0, 1, 0], dtype=bool)
result = algos.take_1d(arr, [0, 2, 2, 1])
expected = arr.take([0, 2, 2, 1])
self.assert_numpy_array_equal(result, expected)
result = algos.take_1d(arr, [0, 2, -1])
self.assertEqual(result.dtype, np.object_)
def test_2d_bool(self):
arr = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 1]], dtype=bool)
result = algos.take_nd(arr, [0, 2, 2, 1])
expected = arr.take([0, 2, 2, 1], axis=0)
self.assert_numpy_array_equal(result, expected)
result = algos.take_nd(arr, [0, 2, 2, 1], axis=1)
expected = arr.take([0, 2, 2, 1], axis=1)
self.assert_numpy_array_equal(result, expected)
result = algos.take_nd(arr, [0, 2, -1])
self.assertEqual(result.dtype, np.object_)
def test_2d_float32(self):
arr = np.random.randn(4, 3).astype(np.float32)
indexer = [0, 2, -1, 1, -1]
# axis=0
result = algos.take_nd(arr, indexer, axis=0)
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, axis=0, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected[[2, 4], :] = np.nan
tm.assert_almost_equal(result, expected)
# this now accepts a float32! # test with float64 out buffer
out = np.empty((len(indexer), arr.shape[1]), dtype='float32')
algos.take_nd(arr, indexer, out=out) # it works!
# axis=1
result = algos.take_nd(arr, indexer, axis=1)
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, axis=1, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=1)
expected[:, [2, 4]] = np.nan
tm.assert_almost_equal(result, expected)
def test_2d_datetime64(self):
# 2005/01/01 - 2006/01/01
arr = np.random.randint(
long(11045376), long(11360736), (5, 3)) * 100000000000
arr = arr.view(dtype='datetime64[ns]')
indexer = [0, 2, -1, 1, -1]
# axis=0
result = algos.take_nd(arr, indexer, axis=0)
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, axis=0, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected.view(np.int64)[[2, 4], :] = iNaT
tm.assert_almost_equal(result, expected)
result = algos.take_nd(arr, indexer, axis=0,
fill_value=datetime(2007, 1, 1))
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, out=result2, axis=0,
fill_value=datetime(2007, 1, 1))
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected[[2, 4], :] = datetime(2007, 1, 1)
tm.assert_almost_equal(result, expected)
# axis=1
result = algos.take_nd(arr, indexer, axis=1)
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, axis=1, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=1)
expected.view(np.int64)[:, [2, 4]] = iNaT
tm.assert_almost_equal(result, expected)
result = algos.take_nd(arr, indexer, axis=1,
fill_value=datetime(2007, 1, 1))
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, out=result2, axis=1,
fill_value=datetime(2007, 1, 1))
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=1)
expected[:, [2, 4]] = datetime(2007, 1, 1)
tm.assert_almost_equal(result, expected)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
kiyoto/statsmodels | examples/python/interactions_anova.py | 25 | 10584 |
## Interactions and ANOVA
# Note: This script is based heavily on Jonathan Taylor's class notes http://www.stanford.edu/class/stats191/interactions.html
#
# Download and format data:
from __future__ import print_function
from statsmodels.compat import urlopen
import numpy as np
np.set_printoptions(precision=4, suppress=True)
import statsmodels.api as sm
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.formula.api import ols
from statsmodels.graphics.api import interaction_plot, abline_plot
from statsmodels.stats.anova import anova_lm
try:
salary_table = pd.read_csv('salary.table')
except: # recent pandas can read URL without urlopen
url = 'http://stats191.stanford.edu/data/salary.table'
fh = urlopen(url)
salary_table = pd.read_table(fh)
salary_table.to_csv('salary.table')
E = salary_table.E
M = salary_table.M
X = salary_table.X
S = salary_table.S
# Take a look at the data:
plt.figure(figsize=(6,6))
symbols = ['D', '^']
colors = ['r', 'g', 'blue']
factor_groups = salary_table.groupby(['E','M'])
for values, group in factor_groups:
i,j = values
plt.scatter(group['X'], group['S'], marker=symbols[j], color=colors[i-1],
s=144)
plt.xlabel('Experience');
plt.ylabel('Salary');
# Fit a linear model:
formula = 'S ~ C(E) + C(M) + X'
lm = ols(formula, salary_table).fit()
print(lm.summary())
# Have a look at the created design matrix:
lm.model.exog[:5]
# Or since we initially passed in a DataFrame, we have a DataFrame available in
lm.model.data.orig_exog[:5]
# We keep a reference to the original untouched data in
lm.model.data.frame[:5]
# Influence statistics
infl = lm.get_influence()
print(infl.summary_table())
# or get a dataframe
df_infl = infl.summary_frame()
df_infl[:5]
# Now plot the reiduals within the groups separately:
resid = lm.resid
plt.figure(figsize=(6,6));
for values, group in factor_groups:
i,j = values
group_num = i*2 + j - 1 # for plotting purposes
x = [group_num] * len(group)
plt.scatter(x, resid[group.index], marker=symbols[j], color=colors[i-1],
s=144, edgecolors='black')
plt.xlabel('Group')
plt.ylabel('Residuals')
# Now we will test some interactions using anova or f_test
interX_lm = ols("S ~ C(E) * X + C(M)", salary_table).fit()
print(interX_lm.summary())
# Do an ANOVA check
from statsmodels.stats.api import anova_lm
table1 = anova_lm(lm, interX_lm)
print(table1)
interM_lm = ols("S ~ X + C(E)*C(M)", data=salary_table).fit()
print(interM_lm.summary())
table2 = anova_lm(lm, interM_lm)
print(table2)
# The design matrix as a DataFrame
interM_lm.model.data.orig_exog[:5]
# The design matrix as an ndarray
interM_lm.model.exog
interM_lm.model.exog_names
infl = interM_lm.get_influence()
resid = infl.resid_studentized_internal
plt.figure(figsize=(6,6))
for values, group in factor_groups:
i,j = values
idx = group.index
plt.scatter(X[idx], resid[idx], marker=symbols[j], color=colors[i-1],
s=144, edgecolors='black')
plt.xlabel('X');
plt.ylabel('standardized resids');
# Looks like one observation is an outlier.
drop_idx = abs(resid).argmax()
print(drop_idx) # zero-based index
idx = salary_table.index.drop(drop_idx)
lm32 = ols('S ~ C(E) + X + C(M)', data=salary_table, subset=idx).fit()
print(lm32.summary())
print('\n')
interX_lm32 = ols('S ~ C(E) * X + C(M)', data=salary_table, subset=idx).fit()
print(interX_lm32.summary())
print('\n')
table3 = anova_lm(lm32, interX_lm32)
print(table3)
print('\n')
interM_lm32 = ols('S ~ X + C(E) * C(M)', data=salary_table, subset=idx).fit()
table4 = anova_lm(lm32, interM_lm32)
print(table4)
print('\n')
# Replot the residuals
try:
resid = interM_lm32.get_influence().summary_frame()['standard_resid']
except:
resid = interM_lm32.get_influence().summary_frame()['standard_resid']
plt.figure(figsize=(6,6))
for values, group in factor_groups:
i,j = values
idx = group.index
plt.scatter(X[idx], resid[idx], marker=symbols[j], color=colors[i-1],
s=144, edgecolors='black')
plt.xlabel('X[~[32]]');
plt.ylabel('standardized resids');
# Plot the fitted values
lm_final = ols('S ~ X + C(E)*C(M)', data = salary_table.drop([drop_idx])).fit()
mf = lm_final.model.data.orig_exog
lstyle = ['-','--']
plt.figure(figsize=(6,6))
for values, group in factor_groups:
i,j = values
idx = group.index
plt.scatter(X[idx], S[idx], marker=symbols[j], color=colors[i-1],
s=144, edgecolors='black')
# drop NA because there is no idx 32 in the final model
plt.plot(mf.X[idx].dropna(), lm_final.fittedvalues[idx].dropna(),
ls=lstyle[j], color=colors[i-1])
plt.xlabel('Experience');
plt.ylabel('Salary');
# From our first look at the data, the difference between Master's and PhD in the management group is different than in the non-management group. This is an interaction between the two qualitative variables management,M and education,E. We can visualize this by first removing the effect of experience, then plotting the means within each of the 6 groups using interaction.plot.
U = S - X * interX_lm32.params['X']
plt.figure(figsize=(6,6))
interaction_plot(E, M, U, colors=['red','blue'], markers=['^','D'],
markersize=10, ax=plt.gca())
# ## Minority Employment Data
try:
jobtest_table = pd.read_table('jobtest.table')
except: # don't have data already
url = 'http://stats191.stanford.edu/data/jobtest.table'
jobtest_table = pd.read_table(url)
factor_group = jobtest_table.groupby(['ETHN'])
plt.figure(figsize=(6,6))
colors = ['purple', 'green']
markers = ['o', 'v']
for factor, group in factor_group:
plt.scatter(group['TEST'], group['JPERF'], color=colors[factor],
marker=markers[factor], s=12**2)
plt.xlabel('TEST');
plt.ylabel('JPERF');
min_lm = ols('JPERF ~ TEST', data=jobtest_table).fit()
print(min_lm.summary())
plt.figure(figsize=(6,6));
for factor, group in factor_group:
plt.scatter(group['TEST'], group['JPERF'], color=colors[factor],
marker=markers[factor], s=12**2)
plt.xlabel('TEST')
plt.ylabel('JPERF')
abline_plot(model_results = min_lm, ax=plt.gca());
min_lm2 = ols('JPERF ~ TEST + TEST:ETHN',
data=jobtest_table).fit()
print(min_lm2.summary())
plt.figure(figsize=(6,6));
for factor, group in factor_group:
plt.scatter(group['TEST'], group['JPERF'], color=colors[factor],
marker=markers[factor], s=12**2)
abline_plot(intercept = min_lm2.params['Intercept'],
slope = min_lm2.params['TEST'], ax=plt.gca(), color='purple');
abline_plot(intercept = min_lm2.params['Intercept'],
slope = min_lm2.params['TEST'] + min_lm2.params['TEST:ETHN'],
ax=plt.gca(), color='green');
min_lm3 = ols('JPERF ~ TEST + ETHN', data = jobtest_table).fit()
print(min_lm3.summary())
plt.figure(figsize=(6,6));
for factor, group in factor_group:
plt.scatter(group['TEST'], group['JPERF'], color=colors[factor],
marker=markers[factor], s=12**2)
abline_plot(intercept = min_lm3.params['Intercept'],
slope = min_lm3.params['TEST'], ax=plt.gca(), color='purple');
abline_plot(intercept = min_lm3.params['Intercept'] + min_lm3.params['ETHN'],
slope = min_lm3.params['TEST'], ax=plt.gca(), color='green');
min_lm4 = ols('JPERF ~ TEST * ETHN', data = jobtest_table).fit()
print(min_lm4.summary())
plt.figure(figsize=(6,6));
for factor, group in factor_group:
plt.scatter(group['TEST'], group['JPERF'], color=colors[factor],
marker=markers[factor], s=12**2)
abline_plot(intercept = min_lm4.params['Intercept'],
slope = min_lm4.params['TEST'], ax=plt.gca(), color='purple');
abline_plot(intercept = min_lm4.params['Intercept'] + min_lm4.params['ETHN'],
slope = min_lm4.params['TEST'] + min_lm4.params['TEST:ETHN'],
ax=plt.gca(), color='green');
# is there any effect of ETHN on slope or intercept?
table5 = anova_lm(min_lm, min_lm4)
print(table5)
# is there any effect of ETHN on intercept
table6 = anova_lm(min_lm, min_lm3)
print(table6)
# is there any effect of ETHN on slope
table7 = anova_lm(min_lm, min_lm2)
print(table7)
# is it just the slope or both?
table8 = anova_lm(min_lm2, min_lm4)
print(table8)
# ## One-way ANOVA
try:
rehab_table = pd.read_csv('rehab.table')
except:
url = 'http://stats191.stanford.edu/data/rehab.csv'
rehab_table = pd.read_table(url, delimiter=",")
rehab_table.to_csv('rehab.table')
plt.figure(figsize=(6,6))
rehab_table.boxplot('Time', 'Fitness', ax=plt.gca())
rehab_lm = ols('Time ~ C(Fitness)', data=rehab_table).fit()
table9 = anova_lm(rehab_lm)
print(table9)
print(rehab_lm.model.data.orig_exog)
print(rehab_lm.summary())
# ## Two-way ANOVA
try:
kidney_table = pd.read_table('./kidney.table')
except:
url = 'http://stats191.stanford.edu/data/kidney.table'
kidney_table = pd.read_table(url, delimiter=" *")
# Explore the dataset
kidney_table.groupby(['Weight', 'Duration']).size()
# Balanced panel
kt = kidney_table
plt.figure(figsize=(6,6))
interaction_plot(kt['Weight'], kt['Duration'], np.log(kt['Days']+1),
colors=['red', 'blue'], markers=['D','^'], ms=10, ax=plt.gca())
# You have things available in the calling namespace available in the formula evaluation namespace
kidney_lm = ols('np.log(Days+1) ~ C(Duration) * C(Weight)', data=kt).fit()
table10 = anova_lm(kidney_lm)
print(anova_lm(ols('np.log(Days+1) ~ C(Duration) + C(Weight)',
data=kt).fit(), kidney_lm))
print(anova_lm(ols('np.log(Days+1) ~ C(Duration)', data=kt).fit(),
ols('np.log(Days+1) ~ C(Duration) + C(Weight, Sum)',
data=kt).fit()))
print(anova_lm(ols('np.log(Days+1) ~ C(Weight)', data=kt).fit(),
ols('np.log(Days+1) ~ C(Duration) + C(Weight, Sum)',
data=kt).fit()))
# ## Sum of squares
#
# Illustrates the use of different types of sums of squares (I,II,II)
# and how the Sum contrast can be used to produce the same output between
# the 3.
#
# Types I and II are equivalent under a balanced design.
#
# Don't use Type III with non-orthogonal contrast - ie., Treatment
sum_lm = ols('np.log(Days+1) ~ C(Duration, Sum) * C(Weight, Sum)',
data=kt).fit()
print(anova_lm(sum_lm))
print(anova_lm(sum_lm, typ=2))
print(anova_lm(sum_lm, typ=3))
nosum_lm = ols('np.log(Days+1) ~ C(Duration, Treatment) * C(Weight, Treatment)',
data=kt).fit()
print(anova_lm(nosum_lm))
print(anova_lm(nosum_lm, typ=2))
print(anova_lm(nosum_lm, typ=3))
| bsd-3-clause |
Ziqi-Li/bknqgis | pandas/pandas/compat/numpy/function.py | 7 | 12722 | """
For compatibility with numpy libraries, pandas functions or
methods have to accept '*args' and '**kwargs' parameters to
accommodate numpy arguments that are not actually used or
respected in the pandas implementation.
To ensure that users do not abuse these parameters, validation
is performed in 'validators.py' to make sure that any extra
parameters passed correspond ONLY to those in the numpy signature.
Part of that validation includes whether or not the user attempted
to pass in non-default values for these extraneous parameters. As we
want to discourage users from relying on these parameters when calling
the pandas implementation, we want them only to pass in the default values
for these parameters.
This module provides a set of commonly used default arguments for functions
and methods that are spread throughout the codebase. This module will make it
easier to adjust to future upstream changes in the analogous numpy signatures.
"""
from numpy import ndarray
from pandas.util._validators import (validate_args, validate_kwargs,
validate_args_and_kwargs)
from pandas.errors import UnsupportedFunctionCall
from pandas.core.dtypes.common import is_integer, is_bool
from pandas.compat import OrderedDict
class CompatValidator(object):
def __init__(self, defaults, fname=None, method=None,
max_fname_arg_count=None):
self.fname = fname
self.method = method
self.defaults = defaults
self.max_fname_arg_count = max_fname_arg_count
def __call__(self, args, kwargs, fname=None,
max_fname_arg_count=None, method=None):
if args or kwargs:
fname = self.fname if fname is None else fname
max_fname_arg_count = (self.max_fname_arg_count if
max_fname_arg_count is None
else max_fname_arg_count)
method = self.method if method is None else method
if method == 'args':
validate_args(fname, args, max_fname_arg_count, self.defaults)
elif method == 'kwargs':
validate_kwargs(fname, kwargs, self.defaults)
elif method == 'both':
validate_args_and_kwargs(fname, args, kwargs,
max_fname_arg_count,
self.defaults)
else:
raise ValueError("invalid validation method "
"'{method}'".format(method=method))
ARGMINMAX_DEFAULTS = dict(out=None)
validate_argmin = CompatValidator(ARGMINMAX_DEFAULTS, fname='argmin',
method='both', max_fname_arg_count=1)
validate_argmax = CompatValidator(ARGMINMAX_DEFAULTS, fname='argmax',
method='both', max_fname_arg_count=1)
def process_skipna(skipna, args):
if isinstance(skipna, ndarray) or skipna is None:
args = (skipna,) + args
skipna = True
return skipna, args
def validate_argmin_with_skipna(skipna, args, kwargs):
"""
If 'Series.argmin' is called via the 'numpy' library,
the third parameter in its signature is 'out', which
takes either an ndarray or 'None', so check if the
'skipna' parameter is either an instance of ndarray or
is None, since 'skipna' itself should be a boolean
"""
skipna, args = process_skipna(skipna, args)
validate_argmin(args, kwargs)
return skipna
def validate_argmax_with_skipna(skipna, args, kwargs):
"""
If 'Series.argmax' is called via the 'numpy' library,
the third parameter in its signature is 'out', which
takes either an ndarray or 'None', so check if the
'skipna' parameter is either an instance of ndarray or
is None, since 'skipna' itself should be a boolean
"""
skipna, args = process_skipna(skipna, args)
validate_argmax(args, kwargs)
return skipna
ARGSORT_DEFAULTS = OrderedDict()
ARGSORT_DEFAULTS['axis'] = -1
ARGSORT_DEFAULTS['kind'] = 'quicksort'
ARGSORT_DEFAULTS['order'] = None
validate_argsort = CompatValidator(ARGSORT_DEFAULTS, fname='argsort',
max_fname_arg_count=0, method='both')
# two different signatures of argsort, this second validation
# for when the `kind` param is supported
ARGSORT_DEFAULTS_KIND = OrderedDict()
ARGSORT_DEFAULTS_KIND['axis'] = -1
ARGSORT_DEFAULTS_KIND['order'] = None
validate_argsort_kind = CompatValidator(ARGSORT_DEFAULTS_KIND, fname='argsort',
max_fname_arg_count=0, method='both')
def validate_argsort_with_ascending(ascending, args, kwargs):
"""
If 'Categorical.argsort' is called via the 'numpy' library, the
first parameter in its signature is 'axis', which takes either
an integer or 'None', so check if the 'ascending' parameter has
either integer type or is None, since 'ascending' itself should
be a boolean
"""
if is_integer(ascending) or ascending is None:
args = (ascending,) + args
ascending = True
validate_argsort_kind(args, kwargs, max_fname_arg_count=3)
return ascending
CLIP_DEFAULTS = dict(out=None)
validate_clip = CompatValidator(CLIP_DEFAULTS, fname='clip',
method='both', max_fname_arg_count=3)
def validate_clip_with_axis(axis, args, kwargs):
"""
If 'NDFrame.clip' is called via the numpy library, the third
parameter in its signature is 'out', which can takes an ndarray,
so check if the 'axis' parameter is an instance of ndarray, since
'axis' itself should either be an integer or None
"""
if isinstance(axis, ndarray):
args = (axis,) + args
axis = None
validate_clip(args, kwargs)
return axis
COMPRESS_DEFAULTS = OrderedDict()
COMPRESS_DEFAULTS['axis'] = None
COMPRESS_DEFAULTS['out'] = None
validate_compress = CompatValidator(COMPRESS_DEFAULTS, fname='compress',
method='both', max_fname_arg_count=1)
CUM_FUNC_DEFAULTS = OrderedDict()
CUM_FUNC_DEFAULTS['dtype'] = None
CUM_FUNC_DEFAULTS['out'] = None
validate_cum_func = CompatValidator(CUM_FUNC_DEFAULTS, method='both',
max_fname_arg_count=1)
validate_cumsum = CompatValidator(CUM_FUNC_DEFAULTS, fname='cumsum',
method='both', max_fname_arg_count=1)
def validate_cum_func_with_skipna(skipna, args, kwargs, name):
"""
If this function is called via the 'numpy' library, the third
parameter in its signature is 'dtype', which takes either a
'numpy' dtype or 'None', so check if the 'skipna' parameter is
a boolean or not
"""
if not is_bool(skipna):
args = (skipna,) + args
skipna = True
validate_cum_func(args, kwargs, fname=name)
return skipna
LOGICAL_FUNC_DEFAULTS = dict(out=None)
validate_logical_func = CompatValidator(LOGICAL_FUNC_DEFAULTS, method='kwargs')
MINMAX_DEFAULTS = dict(out=None)
validate_min = CompatValidator(MINMAX_DEFAULTS, fname='min',
method='both', max_fname_arg_count=1)
validate_max = CompatValidator(MINMAX_DEFAULTS, fname='max',
method='both', max_fname_arg_count=1)
RESHAPE_DEFAULTS = dict(order='C')
validate_reshape = CompatValidator(RESHAPE_DEFAULTS, fname='reshape',
method='both', max_fname_arg_count=1)
REPEAT_DEFAULTS = dict(axis=None)
validate_repeat = CompatValidator(REPEAT_DEFAULTS, fname='repeat',
method='both', max_fname_arg_count=1)
ROUND_DEFAULTS = dict(out=None)
validate_round = CompatValidator(ROUND_DEFAULTS, fname='round',
method='both', max_fname_arg_count=1)
SORT_DEFAULTS = OrderedDict()
SORT_DEFAULTS['axis'] = -1
SORT_DEFAULTS['kind'] = 'quicksort'
SORT_DEFAULTS['order'] = None
validate_sort = CompatValidator(SORT_DEFAULTS, fname='sort',
method='kwargs')
STAT_FUNC_DEFAULTS = OrderedDict()
STAT_FUNC_DEFAULTS['dtype'] = None
STAT_FUNC_DEFAULTS['out'] = None
validate_stat_func = CompatValidator(STAT_FUNC_DEFAULTS,
method='kwargs')
validate_sum = CompatValidator(STAT_FUNC_DEFAULTS, fname='sort',
method='both', max_fname_arg_count=1)
validate_mean = CompatValidator(STAT_FUNC_DEFAULTS, fname='mean',
method='both', max_fname_arg_count=1)
STAT_DDOF_FUNC_DEFAULTS = OrderedDict()
STAT_DDOF_FUNC_DEFAULTS['dtype'] = None
STAT_DDOF_FUNC_DEFAULTS['out'] = None
validate_stat_ddof_func = CompatValidator(STAT_DDOF_FUNC_DEFAULTS,
method='kwargs')
TAKE_DEFAULTS = OrderedDict()
TAKE_DEFAULTS['out'] = None
TAKE_DEFAULTS['mode'] = 'raise'
validate_take = CompatValidator(TAKE_DEFAULTS, fname='take',
method='kwargs')
def validate_take_with_convert(convert, args, kwargs):
"""
If this function is called via the 'numpy' library, the third
parameter in its signature is 'axis', which takes either an
ndarray or 'None', so check if the 'convert' parameter is either
an instance of ndarray or is None
"""
if isinstance(convert, ndarray) or convert is None:
args = (convert,) + args
convert = True
validate_take(args, kwargs, max_fname_arg_count=3, method='both')
return convert
TRANSPOSE_DEFAULTS = dict(axes=None)
validate_transpose = CompatValidator(TRANSPOSE_DEFAULTS, fname='transpose',
method='both', max_fname_arg_count=0)
def validate_transpose_for_generic(inst, kwargs):
try:
validate_transpose(tuple(), kwargs)
except ValueError as e:
klass = type(inst).__name__
msg = str(e)
# the Panel class actual relies on the 'axes' parameter if called
# via the 'numpy' library, so let's make sure the error is specific
# about saying that the parameter is not supported for particular
# implementations of 'transpose'
if "the 'axes' parameter is not supported" in msg:
msg += " for {klass} instances".format(klass=klass)
raise ValueError(msg)
def validate_window_func(name, args, kwargs):
numpy_args = ('axis', 'dtype', 'out')
msg = ("numpy operations are not "
"valid with window objects. "
"Use .{func}() directly instead ".format(func=name))
if len(args) > 0:
raise UnsupportedFunctionCall(msg)
for arg in numpy_args:
if arg in kwargs:
raise UnsupportedFunctionCall(msg)
def validate_rolling_func(name, args, kwargs):
numpy_args = ('axis', 'dtype', 'out')
msg = ("numpy operations are not "
"valid with window objects. "
"Use .rolling(...).{func}() instead ".format(func=name))
if len(args) > 0:
raise UnsupportedFunctionCall(msg)
for arg in numpy_args:
if arg in kwargs:
raise UnsupportedFunctionCall(msg)
def validate_expanding_func(name, args, kwargs):
numpy_args = ('axis', 'dtype', 'out')
msg = ("numpy operations are not "
"valid with window objects. "
"Use .expanding(...).{func}() instead ".format(func=name))
if len(args) > 0:
raise UnsupportedFunctionCall(msg)
for arg in numpy_args:
if arg in kwargs:
raise UnsupportedFunctionCall(msg)
def validate_groupby_func(name, args, kwargs, allowed=None):
"""
'args' and 'kwargs' should be empty, except for allowed
kwargs because all of
their necessary parameters are explicitly listed in
the function signature
"""
if allowed is None:
allowed = []
kwargs = set(kwargs) - set(allowed)
if len(args) + len(kwargs) > 0:
raise UnsupportedFunctionCall((
"numpy operations are not valid "
"with groupby. Use .groupby(...)."
"{func}() instead".format(func=name)))
RESAMPLER_NUMPY_OPS = ('min', 'max', 'sum', 'prod',
'mean', 'std', 'var')
def validate_resampler_func(method, args, kwargs):
"""
'args' and 'kwargs' should be empty because all of
their necessary parameters are explicitly listed in
the function signature
"""
if len(args) + len(kwargs) > 0:
if method in RESAMPLER_NUMPY_OPS:
raise UnsupportedFunctionCall((
"numpy operations are not valid "
"with resample. Use .resample(...)."
"{func}() instead".format(func=method)))
else:
raise TypeError("too many arguments passed in")
| gpl-2.0 |
femtotrader/ig-markets-stream-api-python-library | tests/test_dealing.py | 2 | 1277 | from trading_ig.rest import IGService
import responses
import json
import pandas as pd
"""
unit tests for dealing methods
"""
class TestDealing:
# login v1
@responses.activate
def test_workingorders_happy(self):
with open('tests/data/workingorders.json', 'r') as file:
response_body = json.loads(file.read())
responses.add(responses.GET,
'https://demo-api.ig.com/gateway/deal/workingorders',
headers={'CST': 'abc123', 'X-SECURITY-TOKEN': 'xyz987'},
json=response_body,
status=200)
ig_service = IGService('username', 'password', 'api_key', 'DEMO')
ig_service.crud_session.HEADERS["LOGGED_IN"] = {}
result = ig_service.fetch_working_orders()
pd.set_option('display.max_columns', 50)
print(result)
assert isinstance(result, pd.DataFrame)
assert result.iloc[0]['instrumentName'] == 'Spot Gold'
assert result.iloc[0]['exchangeId'] == 'FX_C_GCSI_ST'
assert result.iloc[0]['marketStatus'] == 'EDITS_ONLY'
assert result.iloc[0]['level'] == 2000.0
assert result.iloc[0]['epic'] == 'CS.D.CFDGOLD.CFDGC.IP'
assert result.iloc[0]['currencyCode'] == 'USD'
| bsd-3-clause |
christobal54/aei-grad-school | bin/mosquito-plot-histograms.py | 1 | 7154 | #!/usr/bin/python
#####
# plots distributions of predictor variables
#####
import aei
import gdal
import numpy as np
from sklearn import tree
from sklearn import ensemble
from sklearn import svm
from sklearn import metrics
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
# set base directory for files
base = '/home/cba/Downloads/mosquito/'
# file for CR-wide data
cr_file = base + 'CCB-LC-Predictors-masked.tif'
cr_ref = gdal.Open(cr_file)
# file for Coto Brus data
cb_file = base + 'CCB-LC-Predictors-southern.tif'
cb_ref = gdal.Open(cb_file)
# file for current sampling data
fs_cb_file = base + 'CR-plot-locations-southern.tif'
fs_cb_ref = gdal.Open(fs_cb_file)
# file for Aedes aegyptii points
ae_file = base + 'CR-mosquito-locations-50m.tif'
ae_ref = gdal.Open(ae_file)
# set the colors and band names to plot
plt_bands = ["Tree Cover (%)", "Soil Cover (%)", "Veg. Cover (%)", "Impervious Cover (%)",
"Min. Temp (C)", "Median Temp (C)", "Max. Temp (C)"]
bands = ["TreeCover", "Soil-Cover", "Veg-Cover", "Impervious-Cover", "Temp-Min",
"Temp-Median", "Temp-Max"]
output_files = []
for band in bands:
output_files.append("{base}CR-{band}-plot.png".format(base = base, band = band))
cols = aei.color.color_blind(len(bands))
# set the models to apply
models = ["DecisionTree", "SVM", "RandomForest", "GradientBoosting"]
mods = [tree.DecisionTreeClassifier(), svm.SVC(), ensemble.RandomForestClassifier(),
ensemble.AdaBoostClassifier()]
output_models = []
for model in models:
output_models.append("{base}CR-southern-prediction-{model}.tif".format(base = base, model = model))
# grab the indices for the aedes aegyptii and field plot data
fs_cb_band = fs_cb_ref.ReadAsArray()
fs_cb = np.where(fs_cb_band == 1)
fs_cb_band = None
ae_band = ae_ref.ReadAsArray()
ae_ind = np.where(ae_band == 1)
ae_band = None
# count how many pixels in coto brus field sites to use as a limit on the
# number of random samples to pull from the national- and coto brus-wide data
nscr = int(2e5)
nfs = fs_cb[0].shape[0]
nae = ae_ind[0].shape[0]
print("Number of field plot samples: {}".format(nfs))
print("Number of aedes aegyptii samples: {}".format(nae))
# set up a variable to hold data to predict aedes occurrences
y = np.zeros(nae * 4)
y[0:nae] = 1
x = np.zeros((nae * 4, len(bands)))
# loop through each band and plot histograms of nation-wide data,
# coto brus-wide data, and the data from all plots
for i in range(cr_ref.RasterCount):
print("Processing band: {band}".format(band = bands[i]))
cr_band = cr_ref.GetRasterBand(i+1)
cb_band = cb_ref.GetRasterBand(i+1)
cr_nodata = cr_band.GetNoDataValue()
cb_nodata = cb_band.GetNoDataValue()
# read the national data into memory and extract the values to plot
cr_arr = cr_band.ReadAsArray()
# if this is the first band, select a random subset of pixels to sample and use that throughout
if i == 0:
cr_ind = np.where(cr_arr != cr_nodata)
cr_rnd = np.int32(np.floor(np.random.uniform(0, cr_ind[0].shape[0], nscr)))
cr_ae = np.int32(np.floor(np.random.uniform(0, cr_ind[0].shape[0], nae*3)))
# subset the national and aedes data and clear the rest out of memory
cr_data = cr_arr[cr_ind[0][cr_rnd], cr_ind[1][cr_rnd]]
ae_data = cr_arr[ae_ind[0], ae_ind[1]]
cr_ae_data = cr_arr[cr_ind[0][cr_ae], cr_ind[1][cr_ae]]
cr_arr = None
# read the coto brus data into memory and extract the values to plot
cb_arr = cb_band.ReadAsArray()
if i == 0:
cb_ind = np.where(cb_arr != cb_nodata)
# pull the field plot and coto brus data
fs_data = cb_arr[fs_cb[0], fs_cb[1]]
cb_rnd = np.int32(np.floor(np.random.uniform(0, cb_ind[0].shape[0], nscr)))
cb_data = cb_arr[cb_ind[0][cb_rnd], cb_ind[1][cb_rnd]]
cb_arr = None
# find the min and max for each data set to set bounds for plot
xmin = min(np.percentile(cr_data, 2), np.percentile(cb_data, 2),
np.percentile(fs_data, 2), np.percentile(ae_data, 2))
xmax = max(np.percentile(cr_data, 98), np.percentile(cb_data, 98),
np.percentile(fs_data, 98), np.percentile(ae_data, 98))
# get a density distribution for each
cr_dns = gaussian_kde(cr_data)
cb_dns = gaussian_kde(cb_data)
fs_dns = gaussian_kde(fs_data)
ae_dns = gaussian_kde(ae_data)
# set custom covariance to smooth multiple peaks
covar = 0.25
cr_dns.covariance_factor = lambda : covar
cb_dns.covariance_factor = lambda : covar
fs_dns.covariance_factor = lambda : covar
ae_dns.covariance_factor = lambda : covar
cr_dns._compute_covariance()
cb_dns._compute_covariance()
fs_dns._compute_covariance()
ae_dns._compute_covariance()
# set an x scale to plot smooth density distribution
xs = np.linspace(xmin, xmax, 200)
# plot each function in a single plot
#plt.figure()
#plt.plot(xs, cr_dns(xs), label = "Costa Rica", color = cols[0])
#plt.plot(xs, cb_dns(xs), label = "Southern CR", color = cols[1])
#plt.plot(xs, fs_dns(xs), label = "Field Plots", color = cols[2])
#plt.plot(xs, ae_dns(xs), label = "Aedes aegyptii", color = cols[3])
#plt.xlabel(plt_bands[i])
#plt.ylabel("Density")
#plt.title("Costa Rica {var} Distributions".format(var = bands[i]))
#plt.legend()
#plt.tight_layout()
#plt.savefig(output_files[i], dpi = 200)
#plt.close()
# add data to the predictor array
x[0:nae, i] = ae_data
x[nae:, i] = cr_ae_data
# that's all, folks!
# now, try and predict aedes occurrence!
# first, split into test/train data
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size = 0.3)
# then loop through the classifiers
for i in range(len(mods)):
print("----------")
print("Classifying using: {mod}".format(mod = models[i]))
# just runnin' on defaults here
model = mods[i]
model.fit(x_train[:,:-1], y_train)
# predict on the training data and report results
y_pred = model.predict(x_test[:,:-1])
report = metrics.classification_report(y_test, y_pred,
target_names = ['random sampling', 'aedes aegyptii'])
print(report)
if i in [0, 2, 3]:
print("Feature importance:")
for j in range(len(bands)-1):
print("{}: {:0.3f}%".format(bands[j], model.feature_importances_[j]))
# apply the prediction to the southern costa rica region
scarr = cb_ref.ReadAsArray()
scarr = scarr[:-1, cb_ind[0], cb_ind[1]]
newpred = model.predict(scarr.transpose())
newarr = np.zeros((cb_ref.RasterYSize, cb_ref.RasterXSize), np.byte)
newarr[cb_ind[0], cb_ind[1]] = newpred
# write to an output file
newref = gdal.GetDriverByName("GTiff").Create(output_models[i], cb_ref.RasterXSize,
cb_ref.RasterYSize, 1, gdal.GDT_Byte)
newref.SetGeoTransform(cb_ref.GetGeoTransform())
newref.SetProjection(cb_ref.GetProjection())
newband = newref.GetRasterBand(1)
newband.WriteArray(newarr)
newband = None
newarr = None
newref = None | mit |
macks22/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/sklearn/covariance/graph_lasso_.py | 11 | 23920 | """GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..utils import ConvergenceWarning
from ..utils.extmath import pinvh
from ..utils.validation import check_random_state, check_array
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..cross_validation import _check_cv as check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graph-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
max_iter=100, verbose=False, return_costs=False,
eps=np.finfo(np.float).eps, return_n_iter=False):
"""l1-penalized covariance estimator
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
# be robust to the max_iter=0 edge case, see:
# https://github.com/scikit-learn/scikit-learn/issues/4134
d_gap = np.inf
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = covariance_[indices != idx].T[indices != idx]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, tol, check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars')
coefs = coefs[:, -1]
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Parameters
----------
alpha : positive float, default 0.01
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
mode : {'cd', 'lars'}, default 'cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, default 1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter : integer, default 100
The maximum number of iterations.
verbose : boolean, default False
If verbose is True, the objective function and dual gap are
plotted at each iteration.
assume_centered : boolean, default False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, max_iter=100,
verbose=False, assume_centered=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.max_iter = max_iter
self.verbose = verbose
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
return_n_iter=True)
return self
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements: strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, defaults to
a 3-fold strategy
tol: positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter: integer, optional
Maximum number of iterations.
mode: {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs: int, optional
number of jobs to run in parallel (default 1).
verbose: boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
alpha_ : float
Penalization parameter selected.
cv_alphas_ : list of float
All penalization parameters explored.
`grid_scores`: 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
n_iter_ : int
Number of iterations run for the optimal alpha.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
max_iter=100, mode='cd', n_jobs=1, verbose=False,
assume_centered=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
"""Fits the GraphLasso covariance model to X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
"""
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, X, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(
delayed(graph_lasso_path)(
X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv)
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
max_iter=self.max_iter, verbose=inner_verbose,
return_n_iter=True)
return self
| mit |
belltailjp/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
OshynSong/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
Migelo/mpa_garching | 1/nrc_vs_z.py | 1 | 2806 | import pygad as pg
import pygad.plotting
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
import utils
import glob
from multiprocessing import Pool
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
filename = __file__
def plot(args):
halo = args[0]
definition = args[1]
path = '/ptmp/mpa/naab/REFINED/%s/SF_X/4x-2phase/out/snap_%s_4x_???' % (halo, halo)
max = int(sorted(glob.glob(path))[-1][-3:])
s, h, g = pg.prepare_zoom('/ptmp/mpa/naab/REFINED/%s/SF_X/4x-2phase/out/snap_%s_4x_%s' % (halo, halo, max), gas_trace='/u/mihac/data/%s/4x-2phase/gastrace_%s' % (halo, definition), star_form=None)
metals_ejection = [item[item > 0] for item in s.gas['metals_at_ejection'][s.gas['num_recycled'] > -1] / s.gas['mass_at_ejection'][s.gas['num_recycled'] > -1]]
metals_infall = [item[item > 0] for item in s.gas['metals_at_infall'][s.gas['num_recycled'] > -1] / s.gas['mass_at_infall'][s.gas['num_recycled'] > -1]]
infall_time = [item[item > 0] for item in s.gas['infall_time'][s.gas['num_recycled'] > -1]]
ejection_time = [item[item > 0] for item in s.gas['ejection_time'][s.gas['num_recycled'] > -1]]
number_of_recycles = s.gas['num_recycled'][s.gas['num_recycled'] > -1]
most_recent_z = []
for i, item in enumerate(metals_infall):
metals = np.vstack([np.column_stack([item, infall_time[i]]), np.column_stack([metals_ejection[i], ejection_time[i]])])
most_recent_z.append(metals[np.argmax(metals[:, 1])][0])
average_nor, edges_nor, count_nor = stats.binned_statistic(most_recent_z, number_of_recycles, statistic='mean', bins=np.linspace(0, .1, 100))
upper_lim = np.percentile(number_of_recycles, 99.98) + 2
bins, y_edge, x_edge = np.histogram2d(number_of_recycles, most_recent_z,
bins=[np.logspace(-4, -1, 31), np.arange(-.5, upper_lim, 1)][::-1])
vmin, vmax = np.percentile(utils.finite(np.log10(bins.flatten())), [0, 95])
f, ax = plt.subplots(1, figsize=utils.figsize[::-1])
abc = ax.pcolormesh(x_edge, y_edge, np.log10(bins), vmin=vmin, vmax=vmax)
cax = inset_axes(ax, width="70%", height="3%", loc=1)
cbar = f.colorbar(abc, cax=cax, orientation='horizontal')
cbar.set_label('$log_{10}(count)$', color='w')
cbar_obj = plt.getp(cbar.ax.axes, 'xticklabels')
plt.setp(cbar_obj, color='w')
ax.set_ylim((y_edge[0], y_edge[-1]))
ax.set_xlabel("z")
ax.set_ylabel("number of recycles")
ax.set_xscale('log')
ax.plot(edges_nor[: -1], average_nor, color='k')
f.tight_layout()
plt.subplots_adjust(top=0.93)
f.suptitle('%s - %s' % (halo, definition), fontsize=20)
plt.savefig(filename.split("/")[-1][:-3] + '_' + halo + '_' + definition + ".png", bbox_inches='tight')
p = Pool(4)
p.map(plot, utils.combinations)
| mit |
GehenHe/Recognize-Face-on-Android | tensorflow/tools/dist_test/python/census_widendeep.py | 54 | 11900 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distributed training and evaluation of a wide and deep model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import sys
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.estimators import run_config
# Constants: Data download URLs
TRAIN_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data"
TEST_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test"
# Define features for the model
def census_model_config():
"""Configuration for the census Wide & Deep model.
Returns:
columns: Column names to retrieve from the data source
label_column: Name of the label column
wide_columns: List of wide columns
deep_columns: List of deep columns
categorical_column_names: Names of the categorical columns
continuous_column_names: Names of the continuous columns
"""
# 1. Categorical base columns.
gender = tf.contrib.layers.sparse_column_with_keys(
column_name="gender", keys=["female", "male"])
race = tf.contrib.layers.sparse_column_with_keys(
column_name="race",
keys=["Amer-Indian-Eskimo",
"Asian-Pac-Islander",
"Black",
"Other",
"White"])
education = tf.contrib.layers.sparse_column_with_hash_bucket(
"education", hash_bucket_size=1000)
marital_status = tf.contrib.layers.sparse_column_with_hash_bucket(
"marital_status", hash_bucket_size=100)
relationship = tf.contrib.layers.sparse_column_with_hash_bucket(
"relationship", hash_bucket_size=100)
workclass = tf.contrib.layers.sparse_column_with_hash_bucket(
"workclass", hash_bucket_size=100)
occupation = tf.contrib.layers.sparse_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.contrib.layers.sparse_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# 2. Continuous base columns.
age = tf.contrib.layers.real_valued_column("age")
age_buckets = tf.contrib.layers.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
education_num = tf.contrib.layers.real_valued_column("education_num")
capital_gain = tf.contrib.layers.real_valued_column("capital_gain")
capital_loss = tf.contrib.layers.real_valued_column("capital_loss")
hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week")
wide_columns = [
gender, native_country, education, occupation, workclass,
marital_status, relationship, age_buckets,
tf.contrib.layers.crossed_column([education, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([native_country, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([age_buckets, race, occupation],
hash_bucket_size=int(1e6))]
deep_columns = [
tf.contrib.layers.embedding_column(workclass, dimension=8),
tf.contrib.layers.embedding_column(education, dimension=8),
tf.contrib.layers.embedding_column(marital_status, dimension=8),
tf.contrib.layers.embedding_column(gender, dimension=8),
tf.contrib.layers.embedding_column(relationship, dimension=8),
tf.contrib.layers.embedding_column(race, dimension=8),
tf.contrib.layers.embedding_column(native_country, dimension=8),
tf.contrib.layers.embedding_column(occupation, dimension=8),
age, education_num, capital_gain, capital_loss, hours_per_week]
# Define the column names for the data sets.
columns = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week",
"native_country", "income_bracket"]
label_column = "label"
categorical_columns = ["workclass", "education", "marital_status",
"occupation", "relationship", "race", "gender",
"native_country"]
continuous_columns = ["age", "education_num", "capital_gain",
"capital_loss", "hours_per_week"]
return (columns, label_column, wide_columns, deep_columns,
categorical_columns, continuous_columns)
class CensusDataSource(object):
"""Source of census data."""
def __init__(self, data_dir, train_data_url, test_data_url,
columns, label_column,
categorical_columns, continuous_columns):
"""Constructor of CensusDataSource.
Args:
data_dir: Directory to save/load the data files
train_data_url: URL from which the training data can be downloaded
test_data_url: URL from which the test data can be downloaded
columns: Columns to retrieve from the data files (A list of strings)
label_column: Name of the label column
categorical_columns: Names of the categorical columns (A list of strings)
continuous_columns: Names of the continuous columsn (A list of strings)
"""
# Retrieve data from disk (if available) or download from the web.
train_file_path = os.path.join(data_dir, "adult.data")
if os.path.isfile(train_file_path):
print("Loading training data from file: %s" % train_file_path)
train_file = open(train_file_path)
else:
urllib.urlretrieve(train_data_url, train_file_path)
test_file_path = os.path.join(data_dir, "adult.test")
if os.path.isfile(test_file_path):
print("Loading test data from file: %s" % test_file_path)
test_file = open(test_file_path)
else:
test_file = open(test_file_path)
urllib.urlretrieve(test_data_url, test_file_path)
# Read the training and testing data sets into Pandas DataFrame.
import pandas # pylint: disable=g-import-not-at-top
self._df_train = pandas.read_csv(train_file, names=columns,
skipinitialspace=True)
self._df_test = pandas.read_csv(test_file, names=columns,
skipinitialspace=True, skiprows=1)
# Remove the NaN values in the last rows of the tables
self._df_train = self._df_train[:-1]
self._df_test = self._df_test[:-1]
# Apply the threshold to get the labels.
income_thresh = lambda x: ">50K" in x
self._df_train[label_column] = (
self._df_train["income_bracket"].apply(income_thresh)).astype(int)
self._df_test[label_column] = (
self._df_test["income_bracket"].apply(income_thresh)).astype(int)
self.label_column = label_column
self.categorical_columns = categorical_columns
self.continuous_columns = continuous_columns
def input_train_fn(self):
return self._input_fn(self._df_train)
def input_test_fn(self):
return self._input_fn(self._df_test)
# TODO(cais): Turn into minibatch feeder
def _input_fn(self, df):
"""Input data function.
Creates a dictionary mapping from each continuous feature column name
(k) to the values of that column stored in a constant Tensor.
Args:
df: data feed
Returns:
feature columns and labels
"""
continuous_cols = {k: tf.constant(df[k].values)
for k in self.continuous_columns}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {
k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in self.categorical_columns}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols.items() + categorical_cols.items())
# Converts the label column into a constant Tensor.
label = tf.constant(df[self.label_column].values)
# Returns the feature columns and the label.
return feature_cols, label
def _create_experiment_fn(output_dir): # pylint: disable=unused-argument
"""Experiment creation function."""
(columns, label_column, wide_columns, deep_columns, categorical_columns,
continuous_columns) = census_model_config()
census_data_source = CensusDataSource(FLAGS.data_dir,
TRAIN_DATA_URL, TEST_DATA_URL,
columns, label_column,
categorical_columns,
continuous_columns)
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
tf.contrib.learn.TaskType.PS: ["fake_ps"] *
FLAGS.num_parameter_servers
},
"task": {
"index": FLAGS.worker_index
}
})
config = run_config.RunConfig(master=FLAGS.master_grpc_url)
estimator = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=FLAGS.model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[5],
config=config)
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=census_data_source.input_train_fn,
eval_input_fn=census_data_source.input_test_fn,
train_steps=FLAGS.train_steps,
eval_steps=FLAGS.eval_steps
)
def main(unused_argv):
print("Worker index: %d" % FLAGS.worker_index)
learn_runner.run(experiment_fn=_create_experiment_fn,
output_dir=FLAGS.output_dir,
schedule=FLAGS.schedule)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--data_dir",
type=str,
default="/tmp/census-data",
help="Directory for storing the cesnsus data"
)
parser.add_argument(
"--model_dir",
type=str,
default="/tmp/census_wide_and_deep_model",
help="Directory for storing the model"
)
parser.add_argument(
"--output_dir",
type=str,
default="",
help="Base output directory."
)
parser.add_argument(
"--schedule",
type=str,
default="local_run",
help="Schedule to run for this experiment."
)
parser.add_argument(
"--master_grpc_url",
type=str,
default="",
help="URL to master GRPC tensorflow server, e.g.,grpc://127.0.0.1:2222"
)
parser.add_argument(
"--num_parameter_servers",
type=int,
default=0,
help="Number of parameter servers"
)
parser.add_argument(
"--worker_index",
type=int,
default=0,
help="Worker index (>=0)"
)
parser.add_argument(
"--train_steps",
type=int,
default=1000,
help="Number of training steps"
)
parser.add_argument(
"--eval_steps",
type=int,
default=1,
help="Number of evaluation steps"
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
BryanCutler/spark | python/pyspark/pandas/tests/test_namespace.py | 1 | 13605 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
import pandas as pd
from pyspark import pandas as ps
from pyspark.pandas.testing.utils import ReusedSQLTestCase, SQLTestUtils
from pyspark.pandas.namespace import _get_index_map
class NamespaceTest(ReusedSQLTestCase, SQLTestUtils):
def test_from_pandas(self):
pdf = pd.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
kdf = ps.from_pandas(pdf)
self.assert_eq(kdf, pdf)
pser = pdf.year
kser = ps.from_pandas(pser)
self.assert_eq(kser, pser)
pidx = pdf.index
kidx = ps.from_pandas(pidx)
self.assert_eq(kidx, pidx)
pmidx = pdf.set_index("year", append=True).index
kmidx = ps.from_pandas(pmidx)
self.assert_eq(kmidx, pmidx)
expected_error_message = "Unknown data type: {}".format(type(kidx).__name__)
with self.assertRaisesRegex(ValueError, expected_error_message):
ps.from_pandas(kidx)
def test_to_datetime(self):
pdf = pd.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
kdf = ps.from_pandas(pdf)
dict_from_pdf = pdf.to_dict()
self.assert_eq(pd.to_datetime(pdf), ps.to_datetime(kdf))
self.assert_eq(pd.to_datetime(dict_from_pdf), ps.to_datetime(dict_from_pdf))
self.assert_eq(pd.to_datetime(1490195805, unit="s"), ps.to_datetime(1490195805, unit="s"))
self.assert_eq(
pd.to_datetime(1490195805433502912, unit="ns"),
ps.to_datetime(1490195805433502912, unit="ns"),
)
self.assert_eq(
pd.to_datetime([1, 2, 3], unit="D", origin=pd.Timestamp("1960-01-01")),
ps.to_datetime([1, 2, 3], unit="D", origin=pd.Timestamp("1960-01-01")),
)
def test_date_range(self):
self.assert_eq(
ps.date_range(start="1/1/2018", end="1/08/2018"),
pd.date_range(start="1/1/2018", end="1/08/2018"),
)
self.assert_eq(
ps.date_range(start="1/1/2018", periods=8), pd.date_range(start="1/1/2018", periods=8)
)
self.assert_eq(
ps.date_range(end="1/1/2018", periods=8), pd.date_range(end="1/1/2018", periods=8)
)
self.assert_eq(
ps.date_range(start="2018-04-24", end="2018-04-27", periods=3),
pd.date_range(start="2018-04-24", end="2018-04-27", periods=3),
)
self.assert_eq(
ps.date_range(start="1/1/2018", periods=5, freq="M"),
pd.date_range(start="1/1/2018", periods=5, freq="M"),
)
self.assert_eq(
ps.date_range(start="1/1/2018", periods=5, freq="3M"),
pd.date_range(start="1/1/2018", periods=5, freq="3M"),
)
self.assert_eq(
ps.date_range(start="1/1/2018", periods=5, freq=pd.offsets.MonthEnd(3)),
pd.date_range(start="1/1/2018", periods=5, freq=pd.offsets.MonthEnd(3)),
)
self.assert_eq(
ps.date_range(start="2017-01-01", end="2017-01-04", closed="left"),
pd.date_range(start="2017-01-01", end="2017-01-04", closed="left"),
)
self.assert_eq(
ps.date_range(start="2017-01-01", end="2017-01-04", closed="right"),
pd.date_range(start="2017-01-01", end="2017-01-04", closed="right"),
)
self.assertRaises(
AssertionError, lambda: ps.date_range(start="1/1/2018", periods=5, tz="Asia/Tokyo")
)
self.assertRaises(
AssertionError, lambda: ps.date_range(start="1/1/2018", periods=5, freq="ns")
)
self.assertRaises(
AssertionError, lambda: ps.date_range(start="1/1/2018", periods=5, freq="N")
)
def test_concat_index_axis(self):
pdf = pd.DataFrame({"A": [0, 2, 4], "B": [1, 3, 5], "C": [6, 7, 8]})
# TODO: pdf.columns.names = ["ABC"]
kdf = ps.from_pandas(pdf)
ignore_indexes = [True, False]
joins = ["inner", "outer"]
sorts = [True, False]
objs = [
([kdf, kdf], [pdf, pdf]),
([kdf, kdf.reset_index()], [pdf, pdf.reset_index()]),
([kdf.reset_index(), kdf], [pdf.reset_index(), pdf]),
([kdf, kdf[["C", "A"]]], [pdf, pdf[["C", "A"]]]),
([kdf[["C", "A"]], kdf], [pdf[["C", "A"]], pdf]),
([kdf, kdf["C"]], [pdf, pdf["C"]]),
([kdf["C"], kdf], [pdf["C"], pdf]),
([kdf["C"], kdf, kdf["A"]], [pdf["C"], pdf, pdf["A"]]),
([kdf, kdf["C"], kdf["A"]], [pdf, pdf["C"], pdf["A"]]),
]
for ignore_index, join, sort in itertools.product(ignore_indexes, joins, sorts):
for i, (kdfs, pdfs) in enumerate(objs):
with self.subTest(
ignore_index=ignore_index, join=join, sort=sort, pdfs=pdfs, pair=i
):
self.assert_eq(
ps.concat(kdfs, ignore_index=ignore_index, join=join, sort=sort),
pd.concat(pdfs, ignore_index=ignore_index, join=join, sort=sort),
almost=(join == "outer"),
)
self.assertRaisesRegex(TypeError, "first argument must be", lambda: ps.concat(kdf))
self.assertRaisesRegex(TypeError, "cannot concatenate object", lambda: ps.concat([kdf, 1]))
kdf2 = kdf.set_index("B", append=True)
self.assertRaisesRegex(
ValueError, "Index type and names should be same", lambda: ps.concat([kdf, kdf2])
)
self.assertRaisesRegex(ValueError, "No objects to concatenate", lambda: ps.concat([]))
self.assertRaisesRegex(ValueError, "All objects passed", lambda: ps.concat([None, None]))
pdf3 = pdf.copy()
kdf3 = kdf.copy()
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
# TODO: colums.names = ["XYZ", "ABC"]
pdf3.columns = columns
kdf3.columns = columns
objs = [
([kdf3, kdf3], [pdf3, pdf3]),
([kdf3, kdf3.reset_index()], [pdf3, pdf3.reset_index()]),
([kdf3.reset_index(), kdf3], [pdf3.reset_index(), pdf3]),
([kdf3, kdf3[[("Y", "C"), ("X", "A")]]], [pdf3, pdf3[[("Y", "C"), ("X", "A")]]]),
([kdf3[[("Y", "C"), ("X", "A")]], kdf3], [pdf3[[("Y", "C"), ("X", "A")]], pdf3]),
]
for ignore_index, sort in itertools.product(ignore_indexes, sorts):
for i, (kdfs, pdfs) in enumerate(objs):
with self.subTest(
ignore_index=ignore_index, join="outer", sort=sort, pdfs=pdfs, pair=i
):
self.assert_eq(
ps.concat(kdfs, ignore_index=ignore_index, join="outer", sort=sort),
pd.concat(pdfs, ignore_index=ignore_index, join="outer", sort=sort),
)
# Skip tests for `join="inner" and sort=False` since pandas is flaky.
for ignore_index in ignore_indexes:
for i, (kdfs, pdfs) in enumerate(objs):
with self.subTest(
ignore_index=ignore_index, join="inner", sort=True, pdfs=pdfs, pair=i
):
self.assert_eq(
ps.concat(kdfs, ignore_index=ignore_index, join="inner", sort=True),
pd.concat(pdfs, ignore_index=ignore_index, join="inner", sort=True),
)
self.assertRaisesRegex(
ValueError,
"MultiIndex columns should have the same levels",
lambda: ps.concat([kdf, kdf3]),
)
self.assertRaisesRegex(
ValueError,
"MultiIndex columns should have the same levels",
lambda: ps.concat([kdf3[("Y", "C")], kdf3]),
)
pdf4 = pd.DataFrame({"A": [0, 2, 4], "B": [1, 3, 5], "C": [10, 20, 30]})
kdf4 = ps.from_pandas(pdf4)
self.assertRaisesRegex(
ValueError,
r"Only can inner \(intersect\) or outer \(union\) join the other axis.",
lambda: ps.concat([kdf, kdf4], join=""),
)
self.assertRaisesRegex(
ValueError,
r"Only can inner \(intersect\) or outer \(union\) join the other axis.",
lambda: ps.concat([kdf, kdf4], join="", axis=1),
)
self.assertRaisesRegex(
ValueError,
r"Only can inner \(intersect\) or outer \(union\) join the other axis.",
lambda: ps.concat([kdf.A, kdf4.B], join="", axis=1),
)
self.assertRaisesRegex(
ValueError,
r"Labels have to be unique; however, got duplicated labels \['A'\].",
lambda: ps.concat([kdf.A, kdf4.A], join="inner", axis=1),
)
def test_concat_column_axis(self):
pdf1 = pd.DataFrame({"A": [0, 2, 4], "B": [1, 3, 5]}, index=[1, 2, 3])
pdf1.columns.names = ["AB"]
pdf2 = pd.DataFrame({"C": [1, 2, 3], "D": [4, 5, 6]}, index=[1, 3, 5])
pdf2.columns.names = ["CD"]
kdf1 = ps.from_pandas(pdf1)
kdf2 = ps.from_pandas(pdf2)
kdf3 = kdf1.copy()
kdf4 = kdf2.copy()
pdf3 = pdf1.copy()
pdf4 = pdf2.copy()
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")], names=["X", "AB"])
pdf3.columns = columns
kdf3.columns = columns
columns = pd.MultiIndex.from_tuples([("X", "C"), ("X", "D")], names=["Y", "CD"])
pdf4.columns = columns
kdf4.columns = columns
ignore_indexes = [True, False]
joins = ["inner", "outer"]
objs = [
([kdf1.A, kdf1.A.rename("B")], [pdf1.A, pdf1.A.rename("B")]),
([kdf3[("X", "A")], kdf3[("X", "B")]], [pdf3[("X", "A")], pdf3[("X", "B")]],),
(
[kdf3[("X", "A")], kdf3[("X", "B")].rename("ABC")],
[pdf3[("X", "A")], pdf3[("X", "B")].rename("ABC")],
),
(
[kdf3[("X", "A")].rename("ABC"), kdf3[("X", "B")]],
[pdf3[("X", "A")].rename("ABC"), pdf3[("X", "B")]],
),
]
for ignore_index, join in itertools.product(ignore_indexes, joins):
for i, (kdfs, pdfs) in enumerate(objs):
with self.subTest(ignore_index=ignore_index, join=join, pdfs=pdfs, pair=i):
actual = ps.concat(kdfs, axis=1, ignore_index=ignore_index, join=join)
expected = pd.concat(pdfs, axis=1, ignore_index=ignore_index, join=join)
self.assert_eq(
repr(actual.sort_values(list(actual.columns)).reset_index(drop=True)),
repr(expected.sort_values(list(expected.columns)).reset_index(drop=True)),
)
# test dataframes equality with broadcast hint.
def test_broadcast(self):
kdf = ps.DataFrame(
{"key": ["K0", "K1", "K2", "K3"], "A": ["A0", "A1", "A2", "A3"]}, columns=["key", "A"]
)
self.assert_eq(kdf, ps.broadcast(kdf))
kdf.columns = ["x", "y"]
self.assert_eq(kdf, ps.broadcast(kdf))
kdf.columns = [("a", "c"), ("b", "d")]
self.assert_eq(kdf, ps.broadcast(kdf))
kser = ps.Series([1, 2, 3])
expected_error_message = "Invalid type : expected DataFrame got {}".format(
type(kser).__name__
)
with self.assertRaisesRegex(ValueError, expected_error_message):
ps.broadcast(kser)
def test_get_index_map(self):
kdf = ps.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
sdf = kdf.to_spark()
self.assertEqual(_get_index_map(sdf), (None, None))
def check(actual, expected):
actual_scols, actual_labels = actual
expected_column_names, expected_labels = expected
self.assertEqual(len(actual_scols), len(expected_column_names))
for actual_scol, expected_column_name in zip(actual_scols, expected_column_names):
expected_scol = sdf[expected_column_name]
self.assertTrue(actual_scol._jc.equals(expected_scol._jc))
self.assertEqual(actual_labels, expected_labels)
check(_get_index_map(sdf, "year"), (["year"], [("year",)]))
check(_get_index_map(sdf, ["year", "month"]), (["year", "month"], [("year",), ("month",)]))
self.assertRaises(KeyError, lambda: _get_index_map(sdf, ["year", "hour"]))
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_namespace import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
Nyker510/scikit-learn | sklearn/kernel_approximation.py | 258 | 17973 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://www.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
mxjl620/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 272 | 5245 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
holdenk/spark | python/pyspark/testing/sqlutils.py | 23 | 7740 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import shutil
import tempfile
from contextlib import contextmanager
from pyspark.sql import SparkSession
from pyspark.sql.types import ArrayType, DoubleType, UserDefinedType, Row
from pyspark.testing.utils import ReusedPySparkTestCase
pandas_requirement_message = None
try:
from pyspark.sql.pandas.utils import require_minimum_pandas_version
require_minimum_pandas_version()
except ImportError as e:
# If Pandas version requirement is not satisfied, skip related tests.
pandas_requirement_message = str(e)
pyarrow_requirement_message = None
try:
from pyspark.sql.pandas.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
except ImportError as e:
# If Arrow version requirement is not satisfied, skip related tests.
pyarrow_requirement_message = str(e)
test_not_compiled_message = None
try:
from pyspark.sql.utils import require_test_compiled
require_test_compiled()
except Exception as e:
test_not_compiled_message = str(e)
have_pandas = pandas_requirement_message is None
have_pyarrow = pyarrow_requirement_message is None
test_compiled = test_not_compiled_message is None
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT() # type: ignore
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class SQLTestUtils(object):
"""
This util assumes the instance of this to have 'spark' attribute, having a spark session.
It is usually used with 'ReusedSQLTestCase' class but can be used if you feel sure the
the implementation of this class has 'spark' attribute.
"""
@contextmanager
def sql_conf(self, pairs):
"""
A convenient context manager to test some configuration specific logic. This sets
`value` to the configuration `key` and then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
keys = pairs.keys()
new_values = pairs.values()
old_values = [self.spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
self.spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
self.spark.conf.unset(key)
else:
self.spark.conf.set(key, old_value)
@contextmanager
def database(self, *databases):
"""
A convenient context manager to test with some specific databases. This drops the given
databases if it exists and sets current database to "default" when it exits.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for db in databases:
self.spark.sql("DROP DATABASE IF EXISTS %s CASCADE" % db)
self.spark.catalog.setCurrentDatabase("default")
@contextmanager
def table(self, *tables):
"""
A convenient context manager to test with some specific tables. This drops the given tables
if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for t in tables:
self.spark.sql("DROP TABLE IF EXISTS %s" % t)
@contextmanager
def tempView(self, *views):
"""
A convenient context manager to test with some specific views. This drops the given views
if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for v in views:
self.spark.catalog.dropTempView(v)
@contextmanager
def function(self, *functions):
"""
A convenient context manager to test with some specific functions. This drops the given
functions if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for f in functions:
self.spark.sql("DROP FUNCTION IF EXISTS %s" % f)
class ReusedSQLTestCase(ReusedPySparkTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
super(ReusedSQLTestCase, cls).setUpClass()
cls.spark = SparkSession(cls.sc)
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
super(ReusedSQLTestCase, cls).tearDownClass()
cls.spark.stop()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
| apache-2.0 |
prashantas/MyDataScience | DeepNetwork/TransferLearning/transfer_learning_resnet50_custom_data.py | 1 | 5482 |
import numpy as np
import os
import time
from resnet50 import ResNet50
from keras.preprocessing import image
from keras.layers import GlobalAveragePooling2D, Dense, Dropout,Activation,Flatten
from imagenet_utils import preprocess_input
from keras.layers import Input
from keras.models import Model
from keras.utils import np_utils
from sklearn.utils import shuffle
from sklearn.cross_validation import train_test_split
img_path = 'elephant.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
print (x.shape)
x = np.expand_dims(x, axis=0)
print (x.shape)
x = preprocess_input(x)
print('Input image shape:', x.shape)
# Loading the training data
PATH = os.getcwd()
# Define data path
data_path = PATH + '/data'
data_dir_list = os.listdir(data_path)
img_data_list=[]
for dataset in data_dir_list:
img_list=os.listdir(data_path+'/'+ dataset)
print ('Loaded the images of dataset-'+'{}\n'.format(dataset))
for img in img_list:
img_path = data_path + '/'+ dataset + '/'+ img
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print('Input image shape:', x.shape)
img_data_list.append(x)
img_data = np.array(img_data_list)
#img_data = img_data.astype('float32')
print (img_data.shape)
img_data=np.rollaxis(img_data,1,0)
print (img_data.shape)
img_data=img_data[0]
print (img_data.shape)
# Define the number of classes
num_classes = 4
num_of_samples = img_data.shape[0]
labels = np.ones((num_of_samples,),dtype='int64')
labels[0:202]=0
labels[202:404]=1
labels[404:606]=2
labels[606:]=3
names = ['cats','dogs','horses','humans']
# convert class labels to on-hot encoding
Y = np_utils.to_categorical(labels, num_classes)
#Shuffle the dataset
x,y = shuffle(img_data,Y, random_state=2)
# Split the dataset
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=2)
###########################################################################################################################
# Custom_resnet_model_1
#Training the classifier alone
image_input = Input(shape=(224, 224, 3))
model = ResNet50(input_tensor=image_input, include_top=True,weights='imagenet')
model.summary()
last_layer = model.get_layer('avg_pool').output
x= Flatten(name='flatten')(last_layer)
out = Dense(num_classes, activation='softmax', name='output_layer')(x)
custom_resnet_model = Model(inputs=image_input,outputs= out)
custom_resnet_model.summary()
for layer in custom_resnet_model.layers[:-1]:
layer.trainable = False
custom_resnet_model.layers[-1].trainable
custom_resnet_model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
t=time.time()
hist = custom_resnet_model.fit(X_train, y_train, batch_size=32, epochs=12, verbose=1, validation_data=(X_test, y_test))
print('Training time: %s' % (t - time.time()))
(loss, accuracy) = custom_resnet_model.evaluate(X_test, y_test, batch_size=10, verbose=1)
print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss,accuracy * 100))
###########################################################################################################################
# Fine tune the resnet 50
#image_input = Input(shape=(224, 224, 3))
model = ResNet50(weights='imagenet',include_top=False)
model.summary()
last_layer = model.output
# add a global spatial average pooling layer
x = GlobalAveragePooling2D()(last_layer)
# add fully-connected & dropout layers
x = Dense(512, activation='relu',name='fc-1')(x)
x = Dropout(0.5)(x)
x = Dense(256, activation='relu',name='fc-2')(x)
x = Dropout(0.5)(x)
# a softmax layer for 4 classes
out = Dense(num_classes, activation='softmax',name='output_layer')(x)
# this is the model we will train
custom_resnet_model2 = Model(inputs=model.input, outputs=out)
custom_resnet_model2.summary()
for layer in custom_resnet_model2.layers[:-6]:
layer.trainable = False
custom_resnet_model2.layers[-1].trainable
custom_resnet_model2.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
t=time.time()
hist = custom_resnet_model2.fit(X_train, y_train, batch_size=32, epochs=12, verbose=1, validation_data=(X_test, y_test))
print('Training time: %s' % (t - time.time()))
(loss, accuracy) = custom_resnet_model2.evaluate(X_test, y_test, batch_size=10, verbose=1)
print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss,accuracy * 100))
############################################################################################
import matplotlib.pyplot as plt
# visualizing losses and accuracy
train_loss=hist.history['loss']
val_loss=hist.history['val_loss']
train_acc=hist.history['acc']
val_acc=hist.history['val_acc']
xc=range(12)
plt.figure(1,figsize=(7,5))
plt.plot(xc,train_loss)
plt.plot(xc,val_loss)
plt.xlabel('num of Epochs')
plt.ylabel('loss')
plt.title('train_loss vs val_loss')
plt.grid(True)
plt.legend(['train','val'])
#print plt.style.available # use bmh, classic,ggplot for big pictures
plt.style.use(['classic'])
plt.figure(2,figsize=(7,5))
plt.plot(xc,train_acc)
plt.plot(xc,val_acc)
plt.xlabel('num of Epochs')
plt.ylabel('accuracy')
plt.title('train_acc vs val_acc')
plt.grid(True)
plt.legend(['train','val'],loc=4)
#print plt.style.available # use bmh, classic,ggplot for big pictures
plt.style.use(['classic']) | bsd-2-clause |
Tong-Chen/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 8 | 1784 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import pylab as pl
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure()
pl.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
pl.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
pl.axis('tight')
pl.show()
| bsd-3-clause |
evgchz/scikit-learn | sklearn/covariance/graph_lasso_.py | 17 | 23130 | """GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..utils import ConvergenceWarning
from ..utils.extmath import pinvh
from ..utils.validation import check_random_state
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..cross_validation import _check_cv as check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graph-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
max_iter=100, verbose=False, return_costs=False,
eps=np.finfo(np.float).eps, return_n_iter=False):
"""l1-penalized covariance estimator
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = covariance_[indices != idx].T[indices != idx]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, tol, check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars')
coefs = coefs[:, -1]
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Parameters
----------
alpha : positive float, optional
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
plotted at each iteration.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, max_iter=100,
verbose=False, assume_centered=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.max_iter = max_iter
self.verbose = verbose
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = np.asarray(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
return_n_iter=True)
return self
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements: strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, defaults to
a 3-fold strategy
tol: positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter: integer, optional
Maximum number of iterations.
mode: {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs: int, optional
number of jobs to run in parallel (default 1).
verbose: boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
Attributes
----------
covariance_ : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
alpha_ : float
Penalization parameter selected.
cv_alphas_ : list of float
All penalization parameters explored.
`grid_scores`: 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
n_iter_ : int
Number of iterations run for the optimal alpha.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
max_iter=100, mode='cd', n_jobs=1, verbose=False,
assume_centered=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = np.asarray(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, X, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(
delayed(graph_lasso_path)(
X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv)
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
max_iter=self.max_iter, verbose=inner_verbose,
return_n_iter=True)
return self
| bsd-3-clause |
spallavolu/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
tanayz/Kaggle | BCI/btb_gbm.py | 1 | 2694 | __author__ = 'tanay'
## author: phalaris
## kaggle bci challenge gbm benchmark
from __future__ import division
import numpy as np
import pandas as pd
import sklearn.ensemble as ens
train_subs = ['02','06','07','11','12','13','14','16','17','18','20','21','22','23','24','26']
test_subs = ['01','03','04','05','08','09','10','15','19','25']
train_labels = pd.read_csv('TrainLabels.csv')
submission = pd.read_csv('SampleSubmission.csv')
train = pd.DataFrame(columns=['subject','session','feedback_num','start_pos'] + ['Cz_' + s for s in map(str,range(261))],index=range(5440))
counter = 0
print 'loading train data'
data = {}
for i in train_subs:
for j in range(1,6):
temp = pd.read_csv('train/Data_S' + i + '_Sess0' + str(j) + '.csv')
fb = temp.query('FeedBackEvent == 1')['FeedBackEvent']
counter2 = 0
for k in fb.index:
temp2 = temp.loc[int(k):int(k)+260,'Cz']
temp2.index = ['Cz_' + s for s in map(str,range(261))]
train.loc[counter,['Cz_' + s for s in map(str,range(261))]] = temp2
train.loc[counter,'session'] = j
train.loc[counter, 'subject'] = i
train.loc[counter, 'feedback_num'] = counter2
train.loc[counter, 'start_pos'] = k
counter +=1
counter2 +=1
print 'subject ', i
train.to_csv('train_cz.csv',ignore_index=True)
test = pd.DataFrame(columns=['subject','session','feedback_num','start_pos'] + ['Cz_' + s for s in map(str,range(261))],index=range(3400))
print 'loading test data'
counter = 0
data = {}
for i in test_subs:
for j in range(1,6):
temp = pd.read_csv('test/Data_S' + i + '_Sess0' + str(j) + '.csv')
fb = temp.query('FeedBackEvent == 1')['FeedBackEvent']
counter2 = 0
for k in fb.index:
temp2 = temp.loc[int(k):int(k)+260,'Cz']
temp2.index = ['Cz_' + s for s in map(str,range(261))]
test.loc[counter,['Cz_' + s for s in map(str,range(261))]] = temp2
test.loc[counter,'session'] = j
test.loc[counter, 'subject'] = i
test.loc[counter, 'feedback_num'] = counter2
test.loc[counter, 'start_pos'] = k
counter +=1
counter2 +=1
print 'subject ', i
test.to_csv('test_cz.csv',ignore_index=True)
print 'training GBM'
gbm = ens.GradientBoostingClassifier(n_estimators=500,learning_rate=0.05, max_features=0.25)
gbm.fit(train, train_labels.values[:,1].ravel())
preds = gbm.predict_proba(test)
preds = preds[:,1]
submission['Prediction'] = preds
submission.to_csv('gbm_benchmark.csv',index=False)
print 'Done' | apache-2.0 |