repo_name
stringlengths 6
103
| path
stringlengths 5
191
| copies
stringlengths 1
4
| size
stringlengths 4
6
| content
stringlengths 986
970k
| license
stringclasses 15
values |
---|---|---|---|---|---|
feranick/SpectralMachine | Other/DNNClassifier-tf/dnnclassifier-tf.py | 1 | 3955 | import os
import six.moves.urllib.request as request
import tensorflow as tf
import numpy as np
import tensorflow.contrib.learn as skflow
from sklearn import preprocessing
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
# Check that we have correct TensorFlow version installed
tf_version = tf.__version__
print("TensorFlow version: {}".format(tf_version))
assert "1.4" <= tf_version, "TensorFlow r1.4 or later is needed"
PATH = ""
FILE_TRAIN = "AAAtest.txt"
#FILE_TRAIN = "Training_kerogen_633nm_HC_20170524a.txt"
#FILE_TRAIN = PATH+"AAA_ram_ex-unor_train.txt"
FILE_TEST = "AAAtest.txt"
tf.logging.set_verbosity(tf.logging.INFO)
def get_feature_names(learnFile):
try:
with open(learnFile, 'r') as f:
M = np.loadtxt(f, unpack =False)
feature_names = np.char.mod('%s',M[0,1:][0:])
return feature_names
except:
print('\033[1m' + ' Learning file not found \n' + '\033[0m')
return
feature_names = get_feature_names(FILE_TRAIN)
print(feature_names.shape)
arr = [[0]]
for i in range(len(feature_names)):
arr.append([0.])
print(arr)
def my_input_fn(learnFile,file_path, perform_shuffle=False, repeat_count=1):
def decode_csv(line):
#parsed_line = tf.decode_csv(line, [[i] for i in range(len(feature_names)+1)],field_delim='\t')
#parsed_line = tf.decode_csv(line, [[0], [0.] for i in range(len(feature_names)+1)],field_delim='\t')
#parsed_line = tf.decode_csv(line, [[0], [0.], [0.], [0.], [0.], [0.]],field_delim='\t')
parsed_line = tf.decode_csv(line, arr,field_delim='\t')
label = parsed_line[0]
tf.cast(label, tf.int32) # Last element is the label
print("\n\nlabel\n",label,"\n\n\n")
#del parsed_line[0] # Delete last element
features = parsed_line[1:] # Everything but last elements are the features
print("\n\nfeatures\n",len(features),"\n\n",features,"\n\n")
d = dict(zip(feature_names, features)), label
return d
dataset = (tf.data.TextLineDataset(learnFile) # Read text file
.skip(1) # Skip header row
.map(decode_csv))
print(dataset)
if perform_shuffle:
# Randomizes input using a window of 256 elements (read into memory)
dataset = dataset.shuffle(buffer_size=256)
dataset = dataset.repeat(repeat_count) # Repeats dataset this # times
dataset = dataset.batch(32) # Batch size to use
iterator = dataset.make_one_shot_iterator()
batch_features, batch_labels = iterator.get_next()
print("\n\nbatch_features\n",len(batch_features),"\n\n",batch_features,"\n\n")
return batch_features, batch_labels
next_batch = my_input_fn(FILE_TRAIN, True) # Will return 32 random elements
print("\n\n",next_batch)
feature_columns = [tf.feature_column.numeric_column(k) for k in feature_names]
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns, # The input features to our model
hidden_units=[400, 200], # Two layers, each with 10 neurons
n_classes=24,
model_dir=PATH)
classifier.train(
input_fn=lambda: my_input_fn(FILE_TRAIN, True, 8))
# Evaluate our model using the examples contained in FILE_TEST
# Return value will contain evaluation_metrics such as: loss & average_loss
evaluate_result = classifier.evaluate(
input_fn=lambda: my_input_fn(FILE_TEST, False, 4))
print("Evaluation results")
for key in evaluate_result:
print(" {}, was: {}".format(key, evaluate_result[key]))
# Predict the type of some Iris flowers.
# Let's predict the examples in FILE_TEST, repeat only once.
predict_results = classifier.predict(
input_fn=lambda: my_input_fn(FILE_TEST, False, 1))
print("Predictions on test file")
for prediction in predict_results:
# Will print the predicted class, i.e: 0, 1, or 2 if the prediction
# is Iris Sentosa, Vericolor, Virginica, respectively.
print(prediction["class_ids"][0])
| gpl-3.0 |
UltronAI/Deep-Learning | Pattern-Recognition/hw2-Feature-Selection/skfeature/example/test_ll_l21.py | 1 | 1781 | import scipy.io
from sklearn import svm
from sklearn import cross_validation
from sklearn.metrics import accuracy_score
from skfeature.utility.sparse_learning import *
from skfeature.function.sparse_learning_based import ll_l21
def main():
# load data
mat = scipy.io.loadmat('../data/COIL20.mat')
X = mat['X'] # data
X = X.astype(float)
y = mat['Y'] # label
y = y[:, 0]
Y = construct_label_matrix_pan(y)
n_samples, n_features = X.shape # number of samples and number of features
# split data into 10 folds
ss = cross_validation.KFold(n_samples, n_folds=10, shuffle=True)
# perform evaluation on classification task
num_fea = 100 # number of selected features
clf = svm.LinearSVC() # linear SVM
correct = 0
for train, test in ss:
# obtain the feature weight matrix
Weight, obj, value_gamma = ll_l21.proximal_gradient_descent(X[train], Y[train], 0.1, verbose=False)
# sort the feature scores in an ascending order according to the feature scores
idx = feature_ranking(Weight)
# obtain the dataset on the selected features
selected_features = X[:, idx[0:num_fea]]
# train a classification model with the selected features on the training dataset
clf.fit(selected_features[train], y[train])
# predict the class labels of test data
y_predict = clf.predict(selected_features[test])
# obtain the classification accuracy on the test data
acc = accuracy_score(y[test], y_predict)
correct = correct + acc
# output the average classification accuracy over all 10 folds
print 'Accuracy:', float(correct)/10
if __name__ == '__main__':
main() | mit |
tdhopper/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 59 | 35368 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
import scipy
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
sp_version = tuple([int(s) for s in scipy.__version__.split('.')])
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='sag', tol=1e-2,
multi_class='ovr', random_state=42)]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg, lbfgs"
" and sag solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear', 'sag']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs', 'sag']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-5, solver=solver,
random_state=0)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-5,
random_state=0)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4,
err_msg="with solver = %s" % solver)
# test for fit_intercept=True
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
Cs = [1e3]
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-6, solver=solver,
intercept_scaling=10000., random_state=0)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000., random_state=0)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4,
err_msg="with solver = %s" % solver)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
lib = LogisticRegression(fit_intercept=False)
sag = LogisticRegression(solver='sag', fit_intercept=False,
random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
tol = 1e-6
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False, tol=tol)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False, tol=tol)
lib = LogisticRegression(fit_intercept=False, tol=tol)
sag = LogisticRegression(solver='sag', fit_intercept=False, tol=tol,
max_iter=1000, random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
clf_sag = LogisticRegressionCV(solver='sag', fit_intercept=False,
class_weight='balanced', max_iter=2000)
clf_sag.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_sag.coef_, decimal=4)
def test_logistic_regression_sample_weights():
X, y = make_classification(n_samples=20, n_features=5, n_informative=3,
n_classes=2, random_state=0)
for LR in [LogisticRegression, LogisticRegressionCV]:
# Test that liblinear fails when sample weights are provided
clf_lib = LR(solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y,
sample_weight=np.ones(y.shape[0]))
# Test that passing sample_weight as ones is the same as
# not passing them at all (default None)
clf_sw_none = LR(solver='lbfgs', fit_intercept=False)
clf_sw_none.fit(X, y)
clf_sw_ones = LR(solver='lbfgs', fit_intercept=False)
clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4)
# Test that sample weights work the same with the lbfgs,
# newton-cg, and 'sag' solvers
clf_sw_lbfgs = LR(solver='lbfgs', fit_intercept=False)
clf_sw_lbfgs.fit(X, y, sample_weight=y + 1)
clf_sw_n = LR(solver='newton-cg', fit_intercept=False)
clf_sw_n.fit(X, y, sample_weight=y + 1)
clf_sw_sag = LR(solver='sag', fit_intercept=False,
max_iter=2000, tol=1e-7)
clf_sw_sag.fit(X, y, sample_weight=y + 1)
assert_array_almost_equal(clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4)
assert_array_almost_equal(clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4)
# Test that passing class_weight as [1,2] is the same as
# passing class weight = [1,1] but adjusting sample weights
# to be 2 for all instances of class 2
clf_cw_12 = LR(solver='lbfgs', fit_intercept=False,
class_weight={0: 1, 1: 2})
clf_cw_12.fit(X, y)
sample_weight = np.ones(y.shape[0])
sample_weight[y == 1] = 2
clf_sw_12 = LR(solver='lbfgs', fit_intercept=False)
clf_sw_12.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(clf_cw_12.coef_, clf_sw_12.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
def test_logreg_predict_proba_multinomial():
X, y = make_classification(n_samples=10, n_features=20, random_state=0,
n_classes=3, n_informative=10)
# Predicted probabilites using the true-entropy loss should give a
# smaller loss than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert_greater(clf_ovr_loss, clf_multi_loss)
# Predicted probabilites using the soft-max function should give a
# smaller loss than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert_greater(clf_wrong_loss, clf_multi_loss)
@ignore_warnings
def test_max_iter():
# Test that the maximum number of iteration is reached
X, y_bin = iris.data, iris.target.copy()
y_bin[y_bin == 2] = 0
solvers = ['newton-cg', 'liblinear', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for max_iter in range(1, 5):
for solver in solvers:
lr = LogisticRegression(max_iter=max_iter, tol=1e-15,
random_state=0, solver=solver)
lr.fit(X, y_bin)
assert_equal(lr.n_iter_[0], max_iter)
def test_n_iter():
# Test that self.n_iter_ has the correct format.
X, y = iris.data, iris.target
y_bin = y.copy()
y_bin[y_bin == 2] = 0
n_Cs = 4
n_cv_fold = 2
for solver in ['newton-cg', 'liblinear', 'sag', 'lbfgs']:
# OvR case
n_classes = 1 if solver == 'liblinear' else np.unique(y).shape[0]
clf = LogisticRegression(tol=1e-2, multi_class='ovr',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
n_classes = np.unique(y).shape[0]
clf = LogisticRegressionCV(tol=1e-2, multi_class='ovr',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
# multinomial case
n_classes = 1
if solver in ('liblinear', 'sag'):
break
clf = LogisticRegression(tol=1e-2, multi_class='multinomial',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
clf = LogisticRegressionCV(tol=1e-2, multi_class='multinomial',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
@ignore_warnings
def test_warm_start():
# A 1-iteration second fit on same data should give almost same result
# with warm starting, and quite different result without warm starting.
# Warm starting does not work with liblinear solver.
X, y = iris.data, iris.target
solvers = ['newton-cg', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for warm_start in [True, False]:
for fit_intercept in [True, False]:
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
if solver == 'sag' and multi_class == 'multinomial':
break
clf = LogisticRegression(tol=1e-4, multi_class=multi_class,
warm_start=warm_start,
solver=solver,
random_state=42, max_iter=100,
fit_intercept=fit_intercept)
clf.fit(X, y)
coef_1 = clf.coef_
clf.max_iter = 1
with ignore_warnings():
clf.fit(X, y)
cum_diff = np.sum(np.abs(coef_1 - clf.coef_))
msg = ("Warm starting issue with %s solver in %s mode "
"with fit_intercept=%s and warm_start=%s"
% (solver, multi_class, str(fit_intercept),
str(warm_start)))
if warm_start:
assert_greater(2.0, cum_diff, msg)
else:
assert_greater(cum_diff, 2.0, msg)
| bsd-3-clause |
mistercrunch/airflow | airflow/providers/google/cloud/operators/automl.py | 1 | 51678 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""This module contains Google AutoML operators."""
import ast
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Tuple, Union
from google.api_core.retry import Retry
from google.cloud.automl_v1beta1 import (
BatchPredictResult,
ColumnSpec,
Dataset,
Model,
PredictResponse,
TableSpec,
)
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.automl import CloudAutoMLHook
if TYPE_CHECKING:
from airflow.utils.context import Context
MetaData = Sequence[Tuple[str, str]]
class AutoMLTrainModelOperator(BaseOperator):
"""
Creates Google Cloud AutoML model.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLTrainModelOperator`
:param model: Model definition.
:type model: dict
:param project_id: ID of the Google Cloud project where model will be created if None then
default project_id is used.
:type project_id: str
:param location: The location of the project.
:type location: str
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:type retry: Optional[google.api_core.retry.Retry]
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
"model",
"location",
"project_id",
"impersonation_chain",
)
def __init__(
self,
*,
model: dict,
location: str,
project_id: Optional[str] = None,
metadata: MetaData = (),
timeout: Optional[float] = None,
retry: Optional[Retry] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.model = model
self.location = location
self.project_id = project_id
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating model.")
operation = hook.create_model(
model=self.model,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = Model.to_dict(operation.result())
model_id = hook.extract_object_id(result)
self.log.info("Model created: %s", model_id)
self.xcom_push(context, key="model_id", value=model_id)
return result
class AutoMLPredictOperator(BaseOperator):
"""
Runs prediction operation on Google Cloud AutoML.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLPredictOperator`
:param model_id: Name of the model requested to serve the batch prediction.
:type model_id: str
:param payload: Name od the model used for the prediction.
:type payload: dict
:param project_id: ID of the Google Cloud project where model is located if None then
default project_id is used.
:type project_id: str
:param location: The location of the project.
:type location: str
:param operation_params: Additional domain-specific parameters for the predictions.
:type operation_params: Optional[Dict[str, str]]
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:type retry: Optional[google.api_core.retry.Retry]
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
"model_id",
"location",
"project_id",
"impersonation_chain",
)
def __init__(
self,
*,
model_id: str,
location: str,
payload: dict,
operation_params: Optional[Dict[str, str]] = None,
project_id: Optional[str] = None,
metadata: MetaData = (),
timeout: Optional[float] = None,
retry: Optional[Retry] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.model_id = model_id
self.operation_params = operation_params # type: ignore
self.location = location
self.project_id = project_id
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.payload = payload
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = hook.predict(
model_id=self.model_id,
payload=self.payload,
location=self.location,
project_id=self.project_id,
params=self.operation_params,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return PredictResponse.to_dict(result)
class AutoMLBatchPredictOperator(BaseOperator):
"""
Perform a batch prediction on Google Cloud AutoML.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLBatchPredictOperator`
:param project_id: ID of the Google Cloud project where model will be created if None then
default project_id is used.
:type project_id: str
:param location: The location of the project.
:type location: str
:param model_id: Name of the model_id requested to serve the batch prediction.
:type model_id: str
:param input_config: Required. The input configuration for batch prediction.
If a dict is provided, it must be of the same form as the protobuf message
`google.cloud.automl_v1beta1.types.BatchPredictInputConfig`
:type input_config: Union[dict, ~google.cloud.automl_v1beta1.types.BatchPredictInputConfig]
:param output_config: Required. The Configuration specifying where output predictions should be
written. If a dict is provided, it must be of the same form as the protobuf message
`google.cloud.automl_v1beta1.types.BatchPredictOutputConfig`
:type output_config: Union[dict, ~google.cloud.automl_v1beta1.types.BatchPredictOutputConfig]
:param prediction_params: Additional domain-specific parameters for the predictions,
any string must be up to 25000 characters long.
:type prediction_params: Optional[Dict[str, str]]
:param project_id: ID of the Google Cloud project where model is located if None then
default project_id is used.
:type project_id: str
:param location: The location of the project.
:type location: str
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:type retry: Optional[google.api_core.retry.Retry]
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
"model_id",
"input_config",
"output_config",
"location",
"project_id",
"impersonation_chain",
)
def __init__(
self,
*,
model_id: str,
input_config: dict,
output_config: dict,
location: str,
project_id: Optional[str] = None,
prediction_params: Optional[Dict[str, str]] = None,
metadata: MetaData = (),
timeout: Optional[float] = None,
retry: Optional[Retry] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.model_id = model_id
self.location = location
self.project_id = project_id
self.prediction_params = prediction_params
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.input_config = input_config
self.output_config = output_config
def execute(self, context: 'Context'):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Fetch batch prediction.")
operation = hook.batch_predict(
model_id=self.model_id,
input_config=self.input_config,
output_config=self.output_config,
project_id=self.project_id,
location=self.location,
params=self.prediction_params,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = BatchPredictResult.to_dict(operation.result())
self.log.info("Batch prediction ready.")
return result
class AutoMLCreateDatasetOperator(BaseOperator):
"""
Creates a Google Cloud AutoML dataset.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLCreateDatasetOperator`
:param dataset: The dataset to create. If a dict is provided, it must be of the
same form as the protobuf message Dataset.
:type dataset: Union[dict, Dataset]
:param project_id: ID of the Google Cloud project where dataset is located if None then
default project_id is used.
:type project_id: str
:param location: The location of the project.
:type location: str
:param params: Additional domain-specific parameters for the predictions.
:type params: Optional[Dict[str, str]]
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:type retry: Optional[google.api_core.retry.Retry]
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
"dataset",
"location",
"project_id",
"impersonation_chain",
)
def __init__(
self,
*,
dataset: dict,
location: str,
project_id: Optional[str] = None,
metadata: MetaData = (),
timeout: Optional[float] = None,
retry: Optional[Retry] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataset = dataset
self.location = location
self.project_id = project_id
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating dataset")
result = hook.create_dataset(
dataset=self.dataset,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = Dataset.to_dict(result)
dataset_id = hook.extract_object_id(result)
self.log.info("Creating completed. Dataset id: %s", dataset_id)
self.xcom_push(context, key="dataset_id", value=dataset_id)
return result
class AutoMLImportDataOperator(BaseOperator):
"""
Imports data to a Google Cloud AutoML dataset.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLImportDataOperator`
:param dataset_id: ID of dataset to be updated.
:type dataset_id: str
:param input_config: The desired input location and its domain specific semantics, if any.
If a dict is provided, it must be of the same form as the protobuf message InputConfig.
:type input_config: dict
:param project_id: ID of the Google Cloud project where dataset is located if None then
default project_id is used.
:type project_id: str
:param location: The location of the project.
:type location: str
:param params: Additional domain-specific parameters for the predictions.
:type params: Optional[Dict[str, str]]
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:type retry: Optional[google.api_core.retry.Retry]
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
"dataset_id",
"input_config",
"location",
"project_id",
"impersonation_chain",
)
def __init__(
self,
*,
dataset_id: str,
location: str,
input_config: dict,
project_id: Optional[str] = None,
metadata: MetaData = (),
timeout: Optional[float] = None,
retry: Optional[Retry] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataset_id = dataset_id
self.input_config = input_config
self.location = location
self.project_id = project_id
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Importing dataset")
operation = hook.import_data(
dataset_id=self.dataset_id,
input_config=self.input_config,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
operation.result()
self.log.info("Import completed")
class AutoMLTablesListColumnSpecsOperator(BaseOperator):
"""
Lists column specs in a table.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLTablesListColumnSpecsOperator`
:param dataset_id: Name of the dataset.
:type dataset_id: str
:param table_spec_id: table_spec_id for path builder.
:type table_spec_id: str
:param field_mask: Mask specifying which fields to read. If a dict is provided, it must be of the same
form as the protobuf message `google.cloud.automl_v1beta1.types.FieldMask`
:type field_mask: Union[dict, google.cloud.automl_v1beta1.types.FieldMask]
:param filter_: Filter expression, see go/filtering.
:type filter_: str
:param page_size: The maximum number of resources contained in the
underlying API response. If page streaming is performed per
resource, this parameter does not affect the return value. If page
streaming is performed per page, this determines the maximum number
of resources in a page.
:type page_size: int
:param project_id: ID of the Google Cloud project where dataset is located if None then
default project_id is used.
:type project_id: str
:param location: The location of the project.
:type location: str
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:type retry: Optional[google.api_core.retry.Retry]
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
"dataset_id",
"table_spec_id",
"field_mask",
"filter_",
"location",
"project_id",
"impersonation_chain",
)
def __init__(
self,
*,
dataset_id: str,
table_spec_id: str,
location: str,
field_mask: Optional[dict] = None,
filter_: Optional[str] = None,
page_size: Optional[int] = None,
project_id: Optional[str] = None,
metadata: MetaData = (),
timeout: Optional[float] = None,
retry: Optional[Retry] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataset_id = dataset_id
self.table_spec_id = table_spec_id
self.field_mask = field_mask
self.filter_ = filter_
self.page_size = page_size
self.location = location
self.project_id = project_id
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Requesting column specs.")
page_iterator = hook.list_column_specs(
dataset_id=self.dataset_id,
table_spec_id=self.table_spec_id,
field_mask=self.field_mask,
filter_=self.filter_,
page_size=self.page_size,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = [ColumnSpec.to_dict(spec) for spec in page_iterator]
self.log.info("Columns specs obtained.")
return result
class AutoMLTablesUpdateDatasetOperator(BaseOperator):
"""
Updates a dataset.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLTablesUpdateDatasetOperator`
:param dataset: The dataset which replaces the resource on the server.
If a dict is provided, it must be of the same form as the protobuf message Dataset.
:type dataset: Union[dict, Dataset]
:param update_mask: The update mask applies to the resource. If a dict is provided, it must
be of the same form as the protobuf message FieldMask.
:type update_mask: Union[dict, FieldMask]
:param location: The location of the project.
:type location: str
:param params: Additional domain-specific parameters for the predictions.
:type params: Optional[Dict[str, str]]
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:type retry: Optional[google.api_core.retry.Retry]
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
"dataset",
"update_mask",
"location",
"impersonation_chain",
)
def __init__(
self,
*,
dataset: dict,
location: str,
update_mask: Optional[dict] = None,
metadata: MetaData = (),
timeout: Optional[float] = None,
retry: Optional[Retry] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataset = dataset
self.update_mask = update_mask
self.location = location
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Updating AutoML dataset %s.", self.dataset["name"])
result = hook.update_dataset(
dataset=self.dataset,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Dataset updated.")
return Dataset.to_dict(result)
class AutoMLGetModelOperator(BaseOperator):
"""
Get Google Cloud AutoML model.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLGetModelOperator`
:param model_id: Name of the model requested to serve the prediction.
:type model_id: str
:param project_id: ID of the Google Cloud project where model is located if None then
default project_id is used.
:type project_id: str
:param location: The location of the project.
:type location: str
:param params: Additional domain-specific parameters for the predictions.
:type params: Optional[Dict[str, str]]
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:type retry: Optional[google.api_core.retry.Retry]
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
"model_id",
"location",
"project_id",
"impersonation_chain",
)
def __init__(
self,
*,
model_id: str,
location: str,
project_id: Optional[str] = None,
metadata: MetaData = (),
timeout: Optional[float] = None,
retry: Optional[Retry] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.model_id = model_id
self.location = location
self.project_id = project_id
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = hook.get_model(
model_id=self.model_id,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return Model.to_dict(result)
class AutoMLDeleteModelOperator(BaseOperator):
"""
Delete Google Cloud AutoML model.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLDeleteModelOperator`
:param model_id: Name of the model requested to serve the prediction.
:type model_id: str
:param project_id: ID of the Google Cloud project where model is located if None then
default project_id is used.
:type project_id: str
:param location: The location of the project.
:type location: str
:param params: Additional domain-specific parameters for the predictions.
:type params: Optional[Dict[str, str]]
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:type retry: Optional[google.api_core.retry.Retry]
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
"model_id",
"location",
"project_id",
"impersonation_chain",
)
def __init__(
self,
*,
model_id: str,
location: str,
project_id: Optional[str] = None,
metadata: MetaData = (),
timeout: Optional[float] = None,
retry: Optional[Retry] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.model_id = model_id
self.location = location
self.project_id = project_id
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
operation = hook.delete_model(
model_id=self.model_id,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
operation.result()
class AutoMLDeployModelOperator(BaseOperator):
"""
Deploys a model. If a model is already deployed, deploying it with the same parameters
has no effect. Deploying with different parameters (as e.g. changing node_number) will
reset the deployment state without pausing the model_id’s availability.
Only applicable for Text Classification, Image Object Detection and Tables; all other
domains manage deployment automatically.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLDeployModelOperator`
:param model_id: Name of the model to be deployed.
:type model_id: str
:param image_detection_metadata: Model deployment metadata specific to Image Object Detection.
If a dict is provided, it must be of the same form as the protobuf message
ImageObjectDetectionModelDeploymentMetadata
:type image_detection_metadata: dict
:param project_id: ID of the Google Cloud project where model is located if None then
default project_id is used.
:type project_id: str
:param location: The location of the project.
:type location: str
:param params: Additional domain-specific parameters for the predictions.
:type params: Optional[Dict[str, str]]
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:type retry: Optional[google.api_core.retry.Retry]
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
"model_id",
"location",
"project_id",
"impersonation_chain",
)
def __init__(
self,
*,
model_id: str,
location: str,
project_id: Optional[str] = None,
image_detection_metadata: Optional[dict] = None,
metadata: Sequence[Tuple[str, str]] = (),
timeout: Optional[float] = None,
retry: Optional[Retry] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.model_id = model_id
self.image_detection_metadata = image_detection_metadata
self.location = location
self.project_id = project_id
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Deploying model_id %s", self.model_id)
operation = hook.deploy_model(
model_id=self.model_id,
location=self.location,
project_id=self.project_id,
image_detection_metadata=self.image_detection_metadata,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
operation.result()
self.log.info("Model deployed.")
class AutoMLTablesListTableSpecsOperator(BaseOperator):
"""
Lists table specs in a dataset.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLTablesListTableSpecsOperator`
:param dataset_id: Name of the dataset.
:type dataset_id: str
:param filter_: Filter expression, see go/filtering.
:type filter_: str
:param page_size: The maximum number of resources contained in the
underlying API response. If page streaming is performed per
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
:type page_size: int
:param project_id: ID of the Google Cloud project if None then
default project_id is used.
:type project_id: str
:param location: The location of the project.
:type location: str
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:type retry: Optional[google.api_core.retry.Retry]
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
"dataset_id",
"filter_",
"location",
"project_id",
"impersonation_chain",
)
def __init__(
self,
*,
dataset_id: str,
location: str,
page_size: Optional[int] = None,
filter_: Optional[str] = None,
project_id: Optional[str] = None,
metadata: MetaData = (),
timeout: Optional[float] = None,
retry: Optional[Retry] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataset_id = dataset_id
self.filter_ = filter_
self.page_size = page_size
self.location = location
self.project_id = project_id
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Requesting table specs for %s.", self.dataset_id)
page_iterator = hook.list_table_specs(
dataset_id=self.dataset_id,
filter_=self.filter_,
page_size=self.page_size,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = [TableSpec.to_dict(spec) for spec in page_iterator]
self.log.info(result)
self.log.info("Table specs obtained.")
return result
class AutoMLListDatasetOperator(BaseOperator):
"""
Lists AutoML Datasets in project.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLListDatasetOperator`
:param project_id: ID of the Google Cloud project where datasets are located if None then
default project_id is used.
:type project_id: str
:param location: The location of the project.
:type location: str
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:type retry: Optional[google.api_core.retry.Retry]
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
"location",
"project_id",
"impersonation_chain",
)
def __init__(
self,
*,
location: str,
project_id: Optional[str] = None,
metadata: MetaData = (),
timeout: Optional[float] = None,
retry: Optional[Retry] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.project_id = project_id
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Requesting datasets")
page_iterator = hook.list_datasets(
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = [Dataset.to_dict(dataset) for dataset in page_iterator]
self.log.info("Datasets obtained.")
self.xcom_push(
context,
key="dataset_id_list",
value=[hook.extract_object_id(d) for d in result],
)
return result
class AutoMLDeleteDatasetOperator(BaseOperator):
"""
Deletes a dataset and all of its contents.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLDeleteDatasetOperator`
:param dataset_id: Name of the dataset_id, list of dataset_id or string of dataset_id
coma separated to be deleted.
:type dataset_id: Union[str, List[str]]
:param project_id: ID of the Google Cloud project where dataset is located if None then
default project_id is used.
:type project_id: str
:param location: The location of the project.
:type location: str
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:type retry: Optional[google.api_core.retry.Retry]
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
"dataset_id",
"location",
"project_id",
"impersonation_chain",
)
def __init__(
self,
*,
dataset_id: Union[str, List[str]],
location: str,
project_id: Optional[str] = None,
metadata: MetaData = (),
timeout: Optional[float] = None,
retry: Optional[Retry] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataset_id = dataset_id
self.location = location
self.project_id = project_id
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
@staticmethod
def _parse_dataset_id(dataset_id: Union[str, List[str]]) -> List[str]:
if not isinstance(dataset_id, str):
return dataset_id
try:
return ast.literal_eval(dataset_id)
except (SyntaxError, ValueError):
return dataset_id.split(",")
def execute(self, context: 'Context'):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
dataset_id_list = self._parse_dataset_id(self.dataset_id)
for dataset_id in dataset_id_list:
self.log.info("Deleting dataset %s", dataset_id)
hook.delete_dataset(
dataset_id=dataset_id,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Dataset deleted.")
| apache-2.0 |
abimannans/scikit-learn | sklearn/neighbors/unsupervised.py | 22 | 4751 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`k_neighbors` and :meth:`kneighbors_graph` methods.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> rng = neigh.radius_neighbors([0, 0, 1.3], 0.4, return_distance=False)
>>> np.asarray(rng[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
| bsd-3-clause |
tdhopper/scikit-learn | sklearn/datasets/lfw.py | 140 | 19372 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
feranick/SpectralMachine | Other/experimental/new_TF_basic/SpectraLearnPredict_test-TF-new.py | 1 | 66279 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
**********************************************************
*
* SpectraLearnPredict
* Perform Machine Learning on Raman spectra.
* version: 20170626c-test-TF-new
*
* Uses: Deep Neural Networks, TensorFlow, SVM, PCA, K-Means
*
* By: Nicola Ferralis <feranick@hotmail.com>
*
***********************************************************
'''
print(__doc__)
import matplotlib
if matplotlib.get_backend() == 'TkAgg':
matplotlib.use('Agg')
import numpy as np
import sys, os.path, getopt, glob, csv
from os.path import exists, splitext
from os import rename
from datetime import datetime, date
import random
#***************************************************************
''' Spectra normalization, preprocessing, model selection '''
#***************************************************************
class preprocDef:
Ynorm = True # Normalize spectra (True: recommended)
fullYnorm = True # Normalize considering full range (True: recommended)
StandardScalerFlag = False # Standardize features by removing the mean and scaling to unit variance (sklearn)
subsetCrossValid = False
percentCrossValid = 0.10 # proportion of TEST data for cross validation
YnormTo = 1
YnormX = 1600
YnormXdelta = 30
enRestrictRegion = False
enLim1 = 450 # for now use indexes rather than actual Energy
enLim2 = 550 # for now use indexes rather than actual Energy
scrambleNoiseFlag = False # Adds random noise to spectra (False: recommended)
scrambleNoiseOffset = 0.1
if StandardScalerFlag == True:
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
#**********************************************
''' Calculation by limited number of points '''
#**********************************************
cherryPickEnPoint = False # False recommended
enSel = [1050, 1150, 1220, 1270, 1330, 1410, 1480, 1590, 1620, 1650]
enSelDelta = [2, 2, 2, 2, 10, 2, 2, 15, 5, 2]
#enSel = [1220, 1270, 1590]
#enSelDelta = [2, 2, 30]
if(cherryPickEnPoint == True):
enRestrictRegion = False
print(' Calculation by limited number of points: ENABLED ')
print(' THIS IS AN EXPERIMENTAL FEATURE \n')
print(' Restricted range: DISABLED')
#***********************************************************
''' Deep Neural Networks - tensorflow via DNNClassifier'''
#***********************************************************
class dnntfDef:
runDNNTF = False
alwaysRetrain = False
alwaysImprove = True
# Format: [number_neurons_HL1, number_neurons_HL2, number_neurons_HL3,...]
hidden_layers = [200]
# Stock Optimizers: Adagrad (recommended), Adam, Ftrl, Momentum, RMSProp, SGD
# https://www.tensorflow.org/api_guides/python/train
optimizer = "Adagrad"
# Additional optimizers: ProximalAdagrad, Adagrad-pro: (allow for parameters)
# https://www.tensorflow.org/api_guides/python/train
#optimizer = "ProximalAdagrad"
learning_rate=0.1
l2_reg_strength=1e-8
# activation functions: https://www.tensorflow.org/api_guides/python/nn
# relu, relu6, crelu, elu, softplus, softsign, dropout, bias_add
# sigmoid, tanh
activation_function = "tanh"
# When not None, the probability of dropout.
dropout_perc = None
trainingSteps = 1000 # number of training steps
valMonitorSecs = 10 # perform validation every given seconds
# threshold in % of probabilities for listing prediction results
thresholdProbabilityPred = 0.01
logCheckpoint = True
plotMap = True
#*************************************************
# Setup variables and definitions- do not change.
#*************************************************
if runDNNTF == True:
import tensorflow as tf
if activation_function == "sigmoid" or activation_function == "tanh":
actFn = "tf."+activation_function
else:
actFn = "tf.nn."+activation_function
activationFn = eval(actFn)
if optimizer == "ProximalAdagrad":
print(" DNNTF: Using ProximalAdagrad, learn_rate:",learning_rate,
", l2_reg_strength:", l2_reg_strength,"\n")
optimizer = tf.train.ProximalAdagradOptimizer(learning_rate=learning_rate,
l2_regularization_strength=l2_reg_strength,
name="ProximalAdagrad")
if optimizer == "Adagrad-pro":
print(" DNNTF: Using Adagrad, learn_rate:",learning_rate,"\n")
optimizer = tf.train.ProximalAdagradOptimizer(learning_rate=learning_rate,
name="Adagrad-pro")
#**********************************************
''' Deep Neural Networks - sklearn'''
#**********************************************
class nnDef:
runNN = False
alwaysRetrain = False
# Format: (number_neurons_HL1, number_neurons_HL2, number_neurons_HL3,)
hidden_layers = (200,) # default: 200
# Optimizers:
# - adam (default), for large datasets
# - lbfgs (default) for smaller datasets
optimizer = "adam"
# activation functions: http://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPClassifier.html
# identity, logistic (sigmoid), tanh, relu
activation_function = "tanh"
l2_reg_strength=1e-5
MLPRegressor = False
# threshold in % of probabilities for listing prediction results
thresholdProbabilityPred = 0.001
plotMap = True
nnClassReport = False
#**********************************************
''' Support Vector Machines'''
#**********************************************
class svmDef:
runSVM = False
alwaysRetrain = False
# threshold in % of probabilities for listing prediction results
thresholdProbabilitySVMPred = 3
''' Training algorithm for SVM
Use either 'linear' or 'rbf'
('rbf' for large number of features) '''
Cfactor = 20
kernel = 'rbf'
showClasses = False
plotMap = True
svmClassReport = False
#**********************************************
''' Principal component analysis (PCA) '''
#**********************************************
class pcaDef:
runPCA = False
customNumPCAComp = True
numPCAcomponents = 2
#**********************************************
''' K-means '''
#**********************************************
class kmDef:
runKM = False
customNumKMComp = False
numKMcomponents = 20
plotKM = False
plotMap = True
#**********************************************
''' TensorFlow '''
#**********************************************
class tfDef:
runTF = True
alwaysRetrain = True
alwaysImprove = True # alwaysRetrain must be True for this to work
# threshold in % of probabilities for listing prediction results
thresholdProbabilityTFPred = 30
decayLearnRate = True
learnRate = 0.75
subsetCrossValid = False
percentCrossValid = 0.05
logCheckpoint = False
plotMap = True
plotClassDistribTF = False
enableTensorboard = False
#**********************************************
''' Plotting '''
#**********************************************
class plotDef:
showProbPlot = False
showPCAPlots = True
createTrainingDataPlot = False
showTrainingDataPlot = False
plotAllSpectra = True # Set to false for extremely large training sets
if plotAllSpectra == False:
stepSpectraPlot = 100 # steps in the number of spectra to be plotted
#**********************************************
''' Multiprocessing '''
#**********************************************
multiproc = False
#**********************************************
''' Main '''
#**********************************************
def main():
try:
opts, args = getopt.getopt(sys.argv[1:],
"fambkph:", ["file", "accuracy", "map", "batch",
"kmaps", "pca", "help"])
except:
usage()
sys.exit(2)
if opts == []:
usage()
sys.exit(2)
print(" Using training file: ", sys.argv[2],"\n")
for o, a in opts:
if o in ("-f" , "--file"):
try:
LearnPredictFile(sys.argv[2], sys.argv[3])
except:
usage()
sys.exit(2)
if o in ("-a" , "--accuracy"):
print('\033[1m Running in cross validation mode for accuracy determination...\033[0m\n')
try:
if sys.argv[3]:
testFile = sys.argv[3]
except:
preprocDef.subsetCrossValid = True
testFile = "tmp"
#try:
trainAccuracy(sys.argv[2], testFile)
#except:
# usage()
# sys.exit(2)
if o in ("-m" , "--map"):
try:
LearnPredictMap(sys.argv[2], sys.argv[3])
except:
usage()
sys.exit(2)
if o in ("-b" , "--batch"):
try:
LearnPredictBatch(sys.argv[2])
except:
usage()
sys.exit(2)
if o in ("-p" , "--pca"):
if len(sys.argv) > 3:
numPCAcomp = int(sys.argv[3])
else:
numPCAcomp = pcaDef.numPCAcomponents
try:
runPCA(sys.argv[2], numPCAcomp)
except:
usage()
sys.exit(2)
if o in ("-k" , "--kmaps"):
if len(sys.argv) > 3:
numKMcomp = int(sys.argv[3])
else:
numKMcomp = kmDef.numKMcomponents
try:
KmMap(sys.argv[2], numKMcomp)
except:
usage()
sys.exit(2)
#**********************************************
''' Learn and Predict - File'''
#**********************************************
def LearnPredictFile(learnFile, sampleFile):
''' Open and process training data '''
En, Cl, A, YnormXind = readLearnFile(learnFile)
learnFileRoot = os.path.splitext(learnFile)[0]
''' Run PCA '''
if pcaDef.runPCA == True:
runPCAmain(A, Cl, En)
''' Open prediction file '''
R, Rx = readPredFile(sampleFile)
''' Preprocess prediction data '''
A, Cl, En, Aorig = preProcessNormLearningData(A, En, Cl, YnormXind, 0)
R, Rorig = preProcessNormPredData(R, Rx, A, En, Cl, YnormXind, 0)
''' Run Neural Network - TensorFlow'''
if dnntfDef.runDNNTF == True:
clf_dnntf, le_dnntf = trainDNNTF(A, Cl, A, Cl, learnFileRoot)
predDNNTF(clf_dnntf, le_dnntf, R, Cl)
''' Run Neural Network - sklearn'''
if nnDef.runNN == True:
clf_nn = trainNN(A, Cl, A, Cl, learnFileRoot)
predNN(clf_nn, A, Cl, R)
''' Run Support Vector Machines '''
if svmDef.runSVM == True:
clf_svm = trainSVM(A, Cl, A, Cl, learnFileRoot)
predSVM(clf_svm, A, Cl, R)
''' Tensorflow '''
if tfDef.runTF == True:
trainTF(A, Cl, A, Cl, learnFileRoot)
predTF(A, Cl, R, learnFileRoot)
''' Plot Training Data '''
if plotDef.createTrainingDataPlot == True:
plotTrainData(A, En, R, plotDef.plotAllSpectra, learnFileRoot)
''' Run K-Means '''
if kmDef.runKM == True:
runKMmain(A, Cl, En, R, Aorig, Rorig)
#**********************************************
''' Train and accuracy'''
#**********************************************
def trainAccuracy(learnFile, testFile):
''' Open and process training data '''
En, Cl, A, YnormXind = readLearnFile(learnFile)
if preprocDef.subsetCrossValid == True:
print(" Cross-validation training using: ",str(preprocDef.percentCrossValid*100),
"% of training file as test subset\n")
A, Cl, A_test, Cl_test = formatSubset(A, Cl, preprocDef.percentCrossValid)
En_test = En
else:
print(" Cross-validation training using: privided test subset (",testFile,")\n")
En_test, Cl_test, A_test, YnormXind2 = readLearnFile(testFile)
learnFileRoot = os.path.splitext(learnFile)[0]
''' Plot Training Data - Raw '''
if plotDef.createTrainingDataPlot == True:
plotTrainData(A, En, A_test, plotDef.plotAllSpectra, learnFileRoot+"_raw")
''' Preprocess prediction data '''
A, Cl, En, Aorig = preProcessNormLearningData(A, En, Cl, YnormXind, 0)
A_test, Cl_test, En_test, Aorig_test = preProcessNormLearningData(A_test, En_test, Cl_test, YnormXind, 0)
''' Run Neural Network - TensorFlow'''
if dnntfDef.runDNNTF == True:
clf_dnntf, le_dnntf = trainDNNTF(A, Cl, A_test, Cl_test, learnFileRoot)
''' Run Neural Network - sklearn'''
if nnDef.runNN == True:
clf_nn = trainNN(A, Cl, A_test, Cl_test, learnFileRoot)
''' Run Support Vector Machines '''
if svmDef.runSVM == True:
clf_svm = trainSVM(A, Cl, A_test, Cl_test, learnFileRoot)
''' Tensorflow '''
if tfDef.runTF == True:
trainTF(A, Cl, A_test, Cl_test, learnFileRoot)
''' Plot Training Data - Normalized'''
if plotDef.createTrainingDataPlot == True:
plotTrainData(A, En, A_test, plotDef.plotAllSpectra, learnFileRoot+"_norm")
#**********************************************
''' Process - Batch'''
#**********************************************
def LearnPredictBatch(learnFile):
summary_filename = 'summary' + str(datetime.now().strftime('_%Y-%m-%d_%H-%M-%S.csv'))
makeHeaderSummary(summary_filename, learnFile)
''' Open and process training data '''
En, Cl, A, YnormXind = readLearnFile(learnFile)
A, Cl, En, Aorig = preProcessNormLearningData(A, En, Cl, YnormXind, 0)
if multiproc == True:
import multiprocessing as mp
p = mp.Pool()
for f in glob.glob('*.txt'):
if (f != learnFile):
p.apply_async(processSingleBatch, args=(f, En, Cl, A, Aorig, YnormXind, summary_filename, learnFile))
p.close()
p.join()
else:
for f in glob.glob('*.txt'):
if (f != learnFile):
processSingleBatch(f, En, Cl, A, Aorig, YnormXind, summary_filename, learnFile)
def processSingleBatch(f, En, Cl, A, Aorig, YnormXind, summary_filename, learnFile):
print(' Processing file: \033[1m' + f + '\033[0m\n')
R, Rx = readPredFile(f)
summaryFile = [f]
''' Preprocess prediction data '''
R, Rorig = preProcessNormPredData(R, Rx, A, En, Cl, YnormXind, 0)
learnFileRoot = os.path.splitext(learnFile)[0]
''' Run Neural Network - TensorFlow'''
if dnntfDef.runDNNTF == True:
clf_dnntf, le_dnntf = trainDNNTF(A, Cl, A, Cl, learnFileRoot)
dnntfPred, dnntfProb = predDNNTF(clf_dnntf, le_dnntf, R, Cl)
summaryFile.extend([nnPred, nnProb])
dnntfDef.alwaysRetrain = False
''' Run Neural Network - sklearn'''
if nnDef.runNN == True:
clf_nn = trainNN(A, Cl, A, Cl, learnFileRoot)
nnPred, nnProb = predNN(clf_nn, A, Cl, R)
summaryFile.extend([nnPred, nnProb])
nnDef.alwaysRetrain = False
''' Run Support Vector Machines '''
if svmDef.runSVM == True:
clf_svm = trainSVM(A, Cl, A, Cl, learnFileRoot)
svmPred, svmProb = predSVM(clf_svm, A, Cl, En, R)
summaryFile.extend([svmPred, svmProb])
svmDef.alwaysRetrain = False
''' Tensorflow '''
if tfDef.runTF == True:
trainTF(A, Cl, A, Cl, learnFileRoot)
tfPred, tfProb = predTF(A, Cl, R, learnFileRoot)
summaryFile.extend([tfPred, tfProb, tfAccur])
tfDef.tfalwaysRetrain = False
''' Run K-Means '''
if kmDef.runKM == True:
kmDef.plotKM = False
kmPred = runKMmain(A, Cl, En, R, Aorig, Rorig)
summaryFile.extend([kmPred])
with open(summary_filename, "a") as sum_file:
csv_out=csv.writer(sum_file)
csv_out.writerow(summaryFile)
sum_file.close()
#**********************************************
''' Learn and Predict - Maps'''
#**********************************************
def LearnPredictMap(learnFile, mapFile):
''' Open and process training data '''
En, Cl, A, YnormXind = readLearnFile(learnFile)
learnFileRoot = os.path.splitext(learnFile)[0]
''' Open prediction map '''
X, Y, R, Rx = readPredMap(mapFile)
type = 0
i = 0;
svmPred = nnPred = tfPred = kmPred = np.empty([X.shape[0]])
A, Cl, En, Aorig = preProcessNormLearningData(A, En, Cl, YnormXind, type)
print(' Processing map...' )
if nnDef.runNN == True:
clf_nn = trainNN(A, Cl, A, Cl, learnFileRoot)
if nnDef.runDNNTF == True:
clf_dnntf, le_dnntf = trainDNNTF(A, Cl, A, Cl, learnFileRoot)
if svmDef.runSVM == True:
clf_svm = trainSVM(A, Cl, A, Cl, learnFileRoot)
for r in R[:]:
r, rorig = preProcessNormPredData(r, Rx, A, En, Cl, YnormXind, type)
type = 1
''' Run Neural Network - TensorFlow'''
if nnDef.runDNNTF == True:
dnntfPred[i], temp = predDNNTF(cl_dnntf, le_dnntf, r, Cl)
saveMap(mapFile, 'DNN-TF', 'HC', dnntfPred[i], X[i], Y[i], True)
dnnDef.alwaysRetrain = False
''' Run Neural Network - sklearn'''
if nnDef.runNN == True:
nnPred[i], temp = predNN(clf_nn, A, Cl, r)
saveMap(mapFile, 'NN', 'HC', nnPred[i], X[i], Y[i], True)
nnDef.alwaysRetrain = False
''' Run Support Vector Machines '''
if svmDef.runSVM == True:
svmPred[i], temp = predSVM(clf_svm, A, Cl, En, r)
saveMap(mapFile, 'svm', 'HC', svmPred[i], X[i], Y[i], True)
svmDef.alwaysRetrain = False
''' Tensorflow '''
if tfDef.runTF == True:
trainTF(A, Cl, A, Cl, learnFileRoot)
tfPred, temp = predTF(A, Cl, r, learnFileRoot)
saveMap(mapFile, 'TF', 'HC', tfPred[i], X[i], Y[i], True)
tfDef.alwaysRetrain = False
''' Run K-Means '''
if kmDef.runKM == True:
kmDef.plotKM = False
kmPred[i] = runKMmain(A, Cl, En, r, Aorig, rorig)
saveMap(mapFile, 'KM', 'HC', kmPred[i], X[i], Y[i], True)
i+=1
if dnntfDef.plotMap == True and dnntfDef.runDNNTF == True:
plotMaps(X, Y, dnntfPred, 'Deep Neural networks - tensorFlow')
if nnDef.plotMap == True and nnDef.runNN == True:
plotMaps(X, Y, nnPred, 'Deep Neural networks - sklearn')
if svmDef.plotMap == True and svmDef.runSVM == True:
plotMaps(X, Y, svmPred, 'SVM')
if tfDef.plotMap == True and tfDef.runTF == True:
plotMaps(X, Y, tfPred, 'TensorFlow')
if kmDef.plotMap == True and kmDef.runKM == True:
plotMaps(X, Y, kmPred, 'K-Means Prediction')
#********************************************************************************
''' TensorFlow '''
''' Run SkFlow - DNN Classifier '''
''' https://www.tensorflow.org/api_docs/python/tf/contrib/learn/DNNClassifier'''
#********************************************************************************
''' Train DNNClassifier model training via TensorFlow-skflow '''
#********************************************************************************
def trainDNNTF(A, Cl, A_test, Cl_test, Root):
print('==========================================================================\n')
print('\033[1m Running Deep Neural Networks: DNNClassifier - TensorFlow...\033[0m')
print(' Hidden layers:', dnntfDef.hidden_layers)
print(' Optimizer:',dnntfDef.optimizer,', Activation function:',dnntfDef.activation_function,
', Dropout:', dnntfDef.dropout_perc)
import tensorflow as tf
import tensorflow.contrib.learn as skflow
from sklearn import preprocessing
if dnntfDef.logCheckpoint ==True:
tf.logging.set_verbosity(tf.logging.INFO)
if dnntfDef.alwaysRetrain == False:
model_directory = Root + "/DNN-TF_" + str(len(dnntfDef.hidden_layers))+"HL_"+str(dnntfDef.hidden_layers[0])
print("\n Training model saved in: ", model_directory, "\n")
else:
dnntfDef.alwaysImprove = True
model_directory = None
print("\n Training model not saved\n")
#**********************************************
''' Initialize Estimator and training data '''
#**********************************************
print(' Initializing TensorFlow...')
tf.reset_default_graph()
totA = np.vstack((A, A_test))
totCl = np.append(Cl, Cl_test)
numTotClasses = np.unique(totCl).size
le = preprocessing.LabelEncoder()
totCl2 = le.fit_transform(totCl)
Cl2 = le.transform(Cl)
Cl2_test = le.transform(Cl_test)
validation_monitor = skflow.monitors.ValidationMonitor(input_fn=lambda: input_fn(A_test, Cl2_test),
eval_steps=1,
every_n_steps=dnntfDef.valMonitorSecs)
feature_columns = skflow.infer_real_valued_columns_from_input(totA.astype(np.float32))
clf = skflow.DNNClassifier(feature_columns=feature_columns, hidden_units=dnntfDef.hidden_layers,
optimizer=dnntfDef.optimizer, n_classes=numTotClasses,
activation_fn=dnntfDef.activationFn, model_dir=model_directory,
config=skflow.RunConfig(save_checkpoints_secs=1),
dropout=dnntfDef.dropout_perc)
print("\n Number of global steps:",dnntfDef.trainingSteps)
#**********************************************
''' Train '''
#**********************************************
if dnntfDef.alwaysImprove == True or os.path.exists(model_directory) is False:
print(" (Re-)training using dataset: ", Root,"\n")
clf.fit(input_fn=lambda: input_fn(A, Cl2),
steps=dnntfDef.trainingSteps, monitors=[validation_monitor])
else:
print(" Retreaving training model from: ", model_directory,"\n")
accuracy_score = clf.evaluate(input_fn=lambda: input_fn(A_test, Cl2_test), steps=1)
print('\n ================================')
print(' \033[1mDNN-TF\033[0m - Accuracy')
print(' ================================')
print("\n Accuracy: {:.2f}%".format(100*accuracy_score["accuracy"]))
print(" Loss: {:.2f}".format(accuracy_score["loss"]))
print(" Global step: {:.2f}\n".format(accuracy_score["global_step"]))
print(' ================================\n')
return clf, le
#********************************************************************************
''' Predict using DNNClassifier model via TensorFlow-skflow '''
#********************************************************************************
def predDNNTF(clf, le, R, Cl):
import tensorflow as tf
import tensorflow.contrib.learn as skflow
from sklearn import preprocessing
#**********************************************
''' Predict '''
#**********************************************
def input_fn_predict():
x = tf.constant(R.astype(np.float32))
return x
pred_class = list(clf.predict_classes(input_fn=input_fn_predict))[0]
predValue = le.inverse_transform(pred_class)
prob = list(clf.predict_proba(input_fn=input_fn_predict))[0]
predProb = round(100*prob[pred_class],2)
rosterPred = np.where(prob>dnntfDef.thresholdProbabilityPred/100)[0]
print('\n ================================')
print(' \033[1mDNN-TF\033[0m - Probability >',str(dnntfDef.thresholdProbabilityPred),'%')
print(' ================================')
print(' Prediction\tProbability [%]')
for i in range(rosterPred.shape[0]):
print(' ',str(np.unique(Cl)[rosterPred][i]),'\t\t',str('{:.4f}'.format(100*prob[rosterPred][i])))
print(' ================================')
print('\033[1m' + '\n Predicted regressor value (Deep Neural Networks - TensorFlow) = ' + predValue +
' (probability = ' + str(predProb) + '%)\033[0m\n')
return predValue, predProb
#**********************************************
''' Format input data for Estimator '''
#**********************************************
def input_fn(A, Cl2):
import tensorflow as tf
x = tf.constant(A.astype(np.float32))
y = tf.constant(Cl2)
return x,y
#********************************************************************************
''' MultiLayer Perceptron - SKlearn '''
''' http://scikit-learn.org/stable/modules/neural_networks_supervised.html'''
#********************************************************************************
''' Train Neural Network - sklearn '''
#********************************************************************************
def trainNN(A, Cl, A_test, Cl_test, Root):
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.externals import joblib
if nnDef.MLPRegressor is False:
Root+"/DNN-TF_"
nnTrainedData = Root + '.nnModelC.pkl'
else:
nnTrainedData = Root + '.nnModelR.pkl'
print('==========================================================================\n')
print('\033[1m Running Neural Network: multi-layer perceptron (MLP)\033[0m')
print(' Hidden layers with neuron count:', nnDef.hidden_layers)
print(' Optimizer:',nnDef.optimizer,', Activation Fn:',nnDef.activation_function,
', L2 reg. strength: ',nnDef.l2_reg_strength)
try:
if nnDef.alwaysRetrain == False:
with open(nnTrainedData):
print(' Opening NN training model...\n')
clf = joblib.load(nnTrainedData)
else:
raise ValueError(' Force NN retraining.')
except:
#**********************************************
''' Retrain training data if not available'''
#**********************************************
if nnDef.MLPRegressor is False:
print(' Retraining NN model using MLP Classifier...')
clf = MLPClassifier(solver=nnDef.optimizer, alpha=nnDef.l2_reg_strength,
activation = nnDef.activation_function,
hidden_layer_sizes=nnDef.hidden_layers, random_state=1)
else:
print(' Retraining NN model using MLP Regressor...')
clf = MLPRegressor(solver=nnDef.optimizer, alpha=nnDef.l2_reg_strength,
hidden_layer_sizes=nnDef.hidden_layers, random_state=1)
Cl = np.array(Cl,dtype=float)
clf.fit(A, Cl)
print(" Training on the full training dataset\n")
accur = clf.score(A_test,Cl_test)
if nnDef.MLPRegressor is False:
print(' Accuracy: ',100*accur,'%\n Loss: {:.5f}'.format(clf.loss_),'\n')
else:
print(' Coefficient of determination R^2: ',accur,
'\n Loss: {:.5f}'.format(clf.loss_),'\n')
joblib.dump(clf, nnTrainedData)
return clf
#********************************************************************************
''' Evaluate Neural Network - sklearn '''
#********************************************************************************
def predNN(clf, A, Cl, R):
if nnDef.MLPRegressor is False:
prob = clf.predict_proba(R)[0].tolist()
rosterPred = np.where(clf.predict_proba(R)[0]>nnDef.thresholdProbabilityPred/100)[0]
print('\n ==============================')
print(' \033[1mNN\033[0m - Probability >',str(nnDef.thresholdProbabilityPred),'%')
print(' ==============================')
print(' Prediction\tProbability [%]')
for i in range(rosterPred.shape[0]):
print(' ',str(np.unique(Cl)[rosterPred][i]),'\t\t',str('{:.4f}'.format(100*clf.predict_proba(R)[0][rosterPred][i])))
print(' ==============================')
predValue = clf.predict(R)[0]
predProb = round(100*max(prob),4)
print('\033[1m' + '\n Predicted classifier value (Deep Neural Networks - sklearn) = ' + str(predValue) +
' (probability = ' + str(predProb) + '%)\033[0m\n')
else:
Cl = np.array(Cl,dtype=float)
predValue = clf.predict(R)[0]
predProb = clf.score(A,Cl)
print('\033[1m' + '\n Predicted regressor value (Deep Neural Networks - sklearn) = ' + str('{:.3f}'.format(predValue)) +
' (R^2 = ' + str('{:.5f}'.format(predProb)) + ')\033[0m\n')
#**************************************
''' Neural Networks Classification Report '''
#**************************************
if nnDef.nnClassReport == True:
print(' Neural Networks Classification Report\n')
runClassReport(clf, A, Cl)
#*************************
''' Plot probabilities '''
#*************************
if plotDef.showProbPlot == True:
if nnDef.MLPRegressor is False:
plotProb(clf, R)
return predValue, predProb
#********************************************************************************
''' Support Vector Machines - SVM '''
''' http://scikit-learn.org/stable/modules/svm.html '''
#********************************************************************************
''' Train SVM '''
#********************************************************************************
def trainSVM(A, Cl, A_test, Cl_test, Root):
from sklearn import svm
from sklearn.externals import joblib
svmTrainedData = Root + '.svmModel.pkl'
print('==========================================================================\n')
print('\033[1m Running Support Vector Machine (kernel: ' + svmDef.kernel + ')\033[0m')
try:
if svmDef.alwaysRetrain == False:
with open(svmTrainedData):
print(' Opening SVM training model...\n')
clf = joblib.load(svmTrainedData)
else:
raise ValueError(' Force retraining SVM model')
except:
#**********************************************
''' Retrain training model if not available'''
#**********************************************
print(' Retraining SVM data...')
clf = svm.SVC(C = svmDef.Cfactor, decision_function_shape = 'ovr', probability=True)
print(" Training on the full training dataset\n")
clf.fit(A,Cl)
accur = clf.score(A_test,Cl_test)
print(' Mean accuracy: ',100*accur,'%')
Z = clf.decision_function(A)
print('\n Number of classes = ' + str(Z.shape[1]))
joblib.dump(clf, svmTrainedData)
if svmDef.showClasses == True:
print(' List of classes: ' + str(clf.classes_))
print('\n==========================================================================\n')
return clf
#********************************************************************************
''' Predict using SVM '''
#********************************************************************************
def predSVM(clf, A, Cl, R):
R_pred = clf.predict(R)
prob = clf.predict_proba(R)[0].tolist()
rosterPred = np.where(clf.predict_proba(R)[0]>svmDef.thresholdProbabilitySVMPred/100)[0]
print('\n ==============================')
print(' \033[1mSVM\033[0m - Probability >',str(svmDef.thresholdProbabilitySVMPred),'%')
print(' ==============================')
print(' Prediction\tProbability [%]')
for i in range(rosterPred.shape[0]):
print(' ',str(np.unique(Cl)[rosterPred][i]),'\t\t',str('{:.1f}'.format(100*clf.predict_proba(R)[0][rosterPred][i])))
print(' ==============================')
print('\033[1m' + '\n Predicted value (SVM) = ' + str(R_pred[0]) + ' (probability = ' +
str(round(100*max(prob),1)) + '%)\033[0m\n')
#**************************************
''' SVM Classification Report '''
#**************************************
if svmDef.svmClassReport == True:
print(' SVM Classification Report \n')
runClassReport(clf, A, Cl)
#*************************
''' Plot probabilities '''
#*************************
if plotDef.showProbPlot == True:
plotProb(clf, R)
return R_pred[0], round(100*max(prob),1)
#********************************************************************************
''' Run PCA '''
''' Transform data:
pca.fit(data).transform(data)
Loading Vectors (eigenvectors):
pca.components_
Eigenvalues:
pca.explained_variance_ratio
'''
#********************************************************************************
def runPCA(learnFile, numPCAcomponents):
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from matplotlib import cm
''' Open and process training data '''
En, Cl, A, YnormXind = readLearnFile(learnFile)
print('==========================================================================\n')
print(' Running PCA...\n')
print(' Number of unique identifiers in training data: ' + str(np.unique(Cl).shape[0]))
if pcaDef.customNumPCAComp == False:
numPCAcomp = np.unique(Cl).shape[0]
else:
numPCAcomp = numPCAcomponents
print(' Number of Principal components: ' + str(numPCAcomp) + '\n')
pca = PCA(n_components=numPCAcomp)
A_r = pca.fit(A).transform(A)
for i in range(0,pca.components_.shape[0]):
print(' Score PC ' + str(i) + ': ' + '{0:.0f}%'.format(pca.explained_variance_ratio_[i] * 100))
print('')
if plotDef.showPCAPlots == True:
print(' Plotting Loadings and score plots... \n')
#***************************
''' Plotting Loadings '''
#***************************
for i in range(0,pca.components_.shape[0]):
plt.plot(En, pca.components_[i,:], label='PC' + str(i) + ' ({0:.0f}%)'.format(pca.explained_variance_ratio_[i] * 100))
plt.plot((En[0], En[En.shape[0]-1]), (0.0, 0.0), 'k--')
plt.title('Loadings plot')
plt.xlabel('Raman shift [1/cm]')
plt.ylabel('Principal component')
plt.legend()
plt.figure()
#***************************
''' Plotting Scores '''
#***************************
Cl_ind = np.zeros(len(Cl))
Cl_labels = np.zeros(0)
ind = np.zeros(np.unique(Cl).shape[0])
for i in range(len(Cl)):
if (np.in1d(Cl[i], Cl_labels, invert=True)):
Cl_labels = np.append(Cl_labels, Cl[i])
for i in range(len(Cl)):
Cl_ind[i] = np.where(Cl_labels == Cl[i])[0][0]
colors = [ cm.jet(x) for x in np.linspace(0, 1, ind.shape[0]) ]
for color, i, target_name in zip(colors, range(ind.shape[0]), Cl_labels):
plt.scatter(A_r[Cl_ind==i,0], A_r[Cl_ind==i,1], color=color, alpha=.8, lw=2, label=target_name)
plt.title('Score plot')
plt.xlabel('PC 0 ({0:.0f}%)'.format(pca.explained_variance_ratio_[0] * 100))
plt.ylabel('PC 1 ({0:.0f}%)'.format(pca.explained_variance_ratio_[1] * 100))
plt.figure()
plt.title('Score box plot')
plt.xlabel('Principal Component')
plt.ylabel('Score')
for j in range(pca.components_.shape[0]):
for color, i, target_name in zip(colors, range(ind.shape[0]), Cl_labels):
plt.scatter([j+1]*len(A_r[Cl_ind==i,j]), A_r[Cl_ind==i,j], color=color, alpha=.8, lw=2, label=target_name)
plt.boxplot(A_r)
plt.figure()
#******************************
''' Plotting Scores vs H:C '''
#******************************
for j in range(pca.components_.shape[0]):
for color, i, target_name in zip(colors, range(ind.shape[0]), Cl_labels):
plt.scatter(np.asarray(Cl)[Cl_ind==i], A_r[Cl_ind==i,j], color=color, alpha=.8, lw=2, label=target_name)
plt.xlabel('H:C elemental ratio')
plt.ylabel('PC ' + str(j) + ' ({0:.0f}%)'.format(pca.explained_variance_ratio_[j] * 100))
plt.figure()
plt.show()
#********************
''' Run K-Means '''
#********************
def runKMmain(A, Cl, En, R, Aorig, Rorig):
from sklearn.cluster import KMeans
print('==========================================================================\n')
print(' Running K-Means...')
print(' Number of unique identifiers in training data: ' + str(np.unique(Cl).shape[0]))
if kmDef.customNumKMComp == False:
numKMcomp = np.unique(Cl).shape[0]
else:
numKMcomp = kmDef.numKMcomponents
kmeans = KMeans(n_clusters=numKMcomp, random_state=0).fit(A)
'''
for i in range(0, numKMcomp):
print('\n Class: ' + str(i) + '\n ',end="")
for j in range(0,kmeans.labels_.shape[0]):
if kmeans.labels_[j] == i:
print(' ' + str(Cl[j]), end="")
'''
print('\n ==============================')
print(' \033[1mKM\033[0m - Predicted class: \033[1m',str(kmeans.predict(R)[0]),'\033[0m')
print(' ==============================')
print(' Prediction')
for j in range(0,kmeans.labels_.shape[0]):
if kmeans.labels_[j] == 22:
print(' ' + str(Cl[j]))
print(' ==============================\n')
if kmDef.plotKM == True:
import matplotlib.pyplot as plt
for j in range(0,kmeans.labels_.shape[0]):
if kmeans.labels_[j] == kmeans.predict(R)[0]:
plt.plot(En, Aorig[j,:])
plt.plot(En, Rorig[0,:], linewidth = 2, label='Predict')
plt.title('K-Means')
plt.xlabel('Raman shift [1/cm]')
plt.ylabel('Intensity')
plt.legend()
plt.show()
return kmeans.predict(R)[0]
#**********************************************
''' K-Means - Maps'''
#**********************************************
def KmMap(mapFile, numKMcomp):
''' Open prediction map '''
X, Y, R, Rx = readPredMap(mapFile)
type = 0
i = 0;
R, Rx, Rorig = preProcessNormMap(R, Rx, type)
from sklearn.cluster import KMeans
print(' Running K-Means...')
print(' Number of classes: ' + str(numKMcomp))
kmeans = KMeans(n_clusters=kmDef.numKMcomponents, random_state=0).fit(R)
kmPred = np.empty([R.shape[0]])
for i in range(0, R.shape[0]):
kmPred[i] = kmeans.predict(R[i,:].reshape(1,-1))[0]
saveMap(mapFile, 'KM', 'Class', int(kmPred[i]), X[i], Y[i], True)
if kmPred[i] in kmeans.labels_:
if os.path.isfile(saveMapName(mapFile, 'KM', 'Class_'+ str(int(kmPred[i]))+'-'+str(np.unique(kmeans.labels_).shape[0]), False)) == False:
saveMap(mapFile, 'KM', 'Class_'+ str(int(kmPred[i])) + '-'+str(np.unique(kmeans.labels_).shape[0]) , '\t'.join(map(str, Rx)), ' ', ' ', False)
saveMap(mapFile, 'KM', 'Class_'+ str(int(kmPred[i])) + '-'+str(np.unique(kmeans.labels_).shape[0]) , '\t'.join(map(str, R[1,:])), X[i], Y[i], False)
if kmDef.plotKM == True:
plotMaps(X, Y, kmPred, 'K-Means')
#************************************
''' Read Learning file '''
#************************************
def readLearnFile(learnFile):
try:
with open(learnFile, 'r') as f:
M = np.loadtxt(f, unpack =False)
except:
print('\033[1m' + ' Learning file not found \n' + '\033[0m')
return
En = np.delete(np.array(M[0,:]),np.s_[0:1],0)
M = np.delete(M,np.s_[0:1],0)
Cl = ['{:.2f}'.format(x) for x in M[:,0]]
A = np.delete(M,np.s_[0:1],1)
Atemp = A[:,range(len(preprocDef.enSel))]
if preprocDef.cherryPickEnPoint == True and preprocDef.enRestrictRegion == False:
enPoints = list(preprocDef.enSel)
enRange = list(preprocDef.enSel)
for i in range(0, len(preprocDef.enSel)):
enRange[i] = np.where((En<float(preprocDef.enSel[i]+preprocDef.enSelDelta[i])) & (En>float(preprocDef.enSel[i]-preprocDef.enSelDelta[i])))[0].tolist()
for j in range(0, A.shape[0]):
Atemp[j,i] = A[j,A[j,enRange[i]].tolist().index(max(A[j, enRange[i]].tolist()))+enRange[i][0]]
enPoints[i] = int(np.average(enRange[i]))
A = Atemp
En = En[enPoints]
if type == 0:
print( ' Cheery picking points in the spectra\n')
# Find index corresponding to energy value to be used for Y normalization
if preprocDef.fullYnorm == True:
YnormXind = np.where(En>0)[0].tolist()
else:
YnormXind_temp = np.where((En<float(preprocDef.YnormX+preprocDef.YnormXdelta)) & (En>float(preprocDef.YnormX-preprocDef.YnormXdelta)))[0].tolist()
if YnormXind_temp == []:
print( ' Renormalization region out of requested range. Normalizing over full range...\n')
YnormXind = np.where(En>0)[0].tolist()
else:
YnormXind = YnormXind_temp
print(' Number of datapoints = ' + str(A.shape[0]))
print(' Size of each datapoint = ' + str(A.shape[1]) + '\n')
return En, Cl, A, YnormXind
#**********************************************
''' Open prediction file '''
#**********************************************
def readPredFile(sampleFile):
try:
with open(sampleFile, 'r') as f:
print(' Opening sample data for prediction...')
Rtot = np.loadtxt(f, unpack =True)
except:
print('\033[1m' + '\n Sample data file not found \n ' + '\033[0m')
return
R=Rtot[1,:]
Rx=Rtot[0,:]
if preprocDef.cherryPickEnPoint == True and preprocDef.enRestrictRegion == False:
Rtemp = R[range(len(preprocDef.enSel))]
enPoints = list(preprocDef.enSel)
enRange = list(preprocDef.enSel)
for i in range(0, len(preprocDef.enSel)):
enRange[i] = np.where((Rx<float(preprocDef.enSel[i]+preprocDef.enSelDelta[i])) & (Rx>float(preprocDef.enSel[i]-preprocDef.enSelDelta[i])))[0].tolist()
Rtemp[i] = R[R[enRange[i]].tolist().index(max(R[enRange[i]].tolist()))+enRange[i][0]]
enPoints[i] = int(np.average(enRange[i]))
R = Rtemp
Rx = Rx[enPoints]
return R, Rx
#**********************************************************************************
''' Preprocess Learning data '''
#**********************************************************************************
def preProcessNormLearningData(A, En, Cl, YnormXind, type):
print(' Processing dataset ... ')
#**********************************************************************************
''' Reformat x-axis in case it does not match that of the training data '''
#**********************************************************************************
if preprocDef.scrambleNoiseFlag == True:
print(' Adding random noise to training set \n')
scrambleNoise(A, preprocDef.scrambleNoiseOffset)
Aorig = np.copy(A)
#**********************************************
''' Normalize/preprocess if flags are set '''
#**********************************************
if preprocDef.Ynorm == True:
if type == 0:
if preprocDef.fullYnorm == False:
print(' Normalizing spectral intensity to: ' + str(preprocDef.YnormTo) + '; En = [' + str(preprocDef.YnormX-preprocDef.YnormXdelta) + ', ' + str(preprocDef.YnormX+preprocDef.YnormXdelta) + ']')
else:
print(' Normalizing spectral intensity to: ' + str(preprocDef.YnormTo) + '; to max intensity in spectra')
for i in range(0,A.shape[0]):
if(np.amin(A[i]) <= 0):
A[i,:] = A[i,:] - np.amin(A[i,:]) + 1e-8
A[i,:] = np.multiply(A[i,:], preprocDef.YnormTo/A[i,A[i][YnormXind].tolist().index(max(A[i][YnormXind].tolist()))+YnormXind[0]])
if preprocDef.StandardScalerFlag == True:
print(' Using StandardScaler from sklearn ')
A = preprocDef.scaler.fit_transform(A)
#**********************************************
''' Energy normalization range '''
#**********************************************
if preprocDef.enRestrictRegion == True:
A = A[:,range(preprocDef.enLim1, preprocDef.enLim2)]
En = En[range(preprocDef.enLim1, preprocDef.enLim2)]
Aorig = Aorig[:,range(preprocDef.enLim1, preprocDef.enLim2)]
if type == 0:
print( ' Restricting energy range between: [' + str(En[0]) + ', ' + str(En[En.shape[0]-1]) + ']\n')
else:
if type == 0:
if(preprocDef.cherryPickEnPoint == True):
print( ' Using selected spectral points:')
print(En)
else:
print( ' Using full energy range: [' + str(En[0]) + ', ' + str(En[En.shape[0]-1]) + ']\n')
return A, Cl, En, Aorig
#**********************************************************************************
''' Preprocess Prediction data '''
#**********************************************************************************
def preProcessNormPredData(R, Rx, A, En, Cl, YnormXind, type):
print(' Processing Prediction data file... ')
#**********************************************************************************
''' Reformat x-axis in case it does not match that of the training data '''
#**********************************************************************************
if(R.shape[0] != A.shape[1]):
if type == 0:
print('\033[1m' + ' WARNING: Different number of datapoints for the x-axis\n for training (' + str(A.shape[1]) + ') and sample (' + str(R.shape[0]) + ') data.\n Reformatting x-axis of sample data...\n' + '\033[0m')
R = np.interp(En, Rx, R)
R = R.reshape(1,-1)
Rorig = np.copy(R)
#**********************************************
''' Normalize/preprocess if flags are set '''
#**********************************************
if preprocDef.Ynorm == True:
if type == 0:
if preprocDef.fullYnorm == False:
print(' Normalizing spectral intensity to: ' + str(preprocDef.YnormTo) + '; En = [' + str(preprocDef.YnormX-preprocDef.YnormXdelta) + ', ' + str(preprocDef.YnormX+preprocDef.YnormXdelta) + ']')
else:
print(' Normalizing spectral intensity to: ' + str(preprocDef.YnormTo) + '; to max intensity in spectra')
if(np.amin(R) <= 0):
print(' Spectra max below zero detected')
R[0,:] = R[0,:] - np.amin(R[0,:]) + 1e-8
R[0,:] = np.multiply(R[0,:], preprocDef.YnormTo/R[0,R[0][YnormXind].tolist().index(max(R[0][YnormXind].tolist()))+YnormXind[0]])
if preprocDef.StandardScalerFlag == True:
print(' Using StandardScaler from sklearn ')
R = preprocDef.scaler.transform(R)
#**********************************************
''' Energy normalization range '''
#**********************************************
if preprocDef.enRestrictRegion == True:
A = A[:,range(preprocDef.enLim1, preprocDef.enLim2)]
En = En[range(preprocDef.enLim1, preprocDef.enLim2)]
R = R[:,range(preprocDef.enLim1, preprocDef.enLim2)]
if type == 0:
print( ' Restricting energy range between: [' + str(En[0]) + ', ' + str(En[En.shape[0]-1]) + ']\n')
else:
if type == 0:
if(preprocDef.cherryPickEnPoint == True):
print( ' Using selected spectral points:')
print(En)
else:
print( ' Using full energy range: [' + str(En[0]) + ', ' + str(En[En.shape[0]-1]) + ']\n')
return R, Rorig
#**********************************************************************************
''' Preprocess prediction data '''
#**********************************************************************************
def preProcessNormMap(A, En, type):
#**********************************************************************************
''' Reformat x-axis in case it does not match that of the training data '''
#**********************************************************************************
# Find index corresponding to energy value to be used for Y normalization
if preprocDef.fullYnorm == False:
YnormXind = np.where((En<float(preprocDef.YnormX+preprocDef.YnormXdelta)) & (En>float(preprocDef.YnormX-preprocDef.YnormXdelta)))[0].tolist()
else:
YnormXind = np.where(En>0)[0].tolist()
Aorig = np.copy(A)
#**********************************************
''' Normalize/preprocess if flags are set '''
#**********************************************
if preprocDef.Ynorm == True:
if type == 0:
print(' Normalizing spectral intensity to: ' + str(preprocDef.YnormTo) + '; En = [' + str(preprocDef.YnormX-preprocDef.YnormXdelta) + ', ' + str(preprocDef.YnormX+preprocDef.YnormXdelta) + ']')
for i in range(0,A.shape[0]):
A[i,:] = np.multiply(A[i,:], preprocDef.YnormTo/np.amax(A[i]))
if preprocDef.StandardScalerFlag == True:
print(' Using StandardScaler from sklearn ')
A = preprocDef.scaler.fit_transform(A)
#**********************************************
''' Energy normalization range '''
#**********************************************
if preprocDef.enRestrictRegion == True:
A = A[:,range(preprocDef.enLim1, preprocDef.enLim2)]
En = En[range(preprocDef.enLim1, preprocDef.enLim2)]
Aorig = Aorig[:,range(preprocDef.enLim1, preprocDef.enLim2)]
if type == 0:
print( ' Restricting energy range between: [' + str(En[0]) + ', ' + str(En[En.shape[0]-1]) + ']\n')
else:
if type == 0:
print( ' Using full energy range: [' + str(En[0]) + ', ' + str(En[En.shape[0]-1]) + ']\n')
return A, En, Aorig
####################################################################
''' Format subset of training data '''
####################################################################
def formatSubset(A, Cl, percent):
from sklearn.model_selection import train_test_split
A_train, A_cv, Cl_train, Cl_cv = \
train_test_split(A, Cl, test_size=percent, random_state=42)
return A_train, Cl_train, A_cv, Cl_cv
####################################################################
''' Open map files '''
####################################################################
def readPredMap(mapFile):
try:
with open(mapFile, 'r') as f:
En = np.array(f.readline().split(), dtype=np.dtype(float))
A = np.loadtxt(f, unpack =False)
except:
print('\033[1m' + ' Map data file not found \n' + '\033[0m')
return
X = A[:,0]
Y = A[:,1]
A = np.delete(A, np.s_[0:2], 1)
print(' Shape map: ' + str(A.shape))
return X, Y, A, En
####################################################################
''' Save map files '''
####################################################################
def saveMap(file, type, extension, s, x1, y1, comma):
inputFile = saveMapName(file, type, extension, comma)
with open(inputFile, "a") as coord_file:
if comma==True:
coord_file.write('{:},'.format(x1))
coord_file.write('{:},'.format(y1))
else:
coord_file.write('{:}\t'.format(x1))
coord_file.write('{:}\t'.format(y1))
coord_file.write('{:}\n'.format(s))
coord_file.close()
def saveMapName(file, type, extension, comma):
if comma==True:
extension2 = '_map.csv'
else:
extension2 = '_map.txt'
return os.path.splitext(file)[0] + '_' + type + '-' + extension + extension2
#************************************
''' Plot Probabilities'''
#************************************
def plotProb(clf, R):
prob = clf.predict_proba(R)[0].tolist()
print(' Probabilities of this sample within each class: \n')
for i in range(0,clf.classes_.shape[0]):
print(' ' + str(clf.classes_[i]) + ': ' + str(round(100*prob[i],2)) + '%')
import matplotlib.pyplot as plt
print('\n Stand by: Plotting probabilities for each class... \n')
plt.title('Probability density per class')
for i in range(0, clf.classes_.shape[0]):
plt.scatter(clf.classes_[i], round(100*prob[i],2), label='probability', c = 'red')
plt.grid(True)
plt.xlabel('Class')
plt.ylabel('Probability [%]')
plt.show()
#************************************
''' Plot Training data'''
#************************************
def plotTrainData(A, En, R, plotAllSpectra, learnFileRoot):
import matplotlib.pyplot as plt
if plotDef.plotAllSpectra == True:
step = 1
learnFileRoot = learnFileRoot + '_full-set'
else:
step = plotDef.stepSpectraPlot
learnFileRoot = learnFileRoot + '_partial-' + str(step)
print(' Plotting Training dataset in: ' + learnFileRoot + '.png\n')
if preprocDef.Ynorm ==True:
plt.title('Normalized Training Data')
else:
plt.title('Training Data')
for i in range(0,A.shape[0], step):
plt.plot(En, A[i,:], label='Training data')
plt.plot(En, R[0,:], linewidth = 4, label='Sample data')
plt.xlabel('Raman shift [1/cm]')
plt.ylabel('Raman Intensity [arb. units]')
plt.savefig(learnFileRoot + '.png', dpi = 160, format = 'png') # Save plot
if plotDef.showTrainingDataPlot == True:
plt.show()
plt.close()
#************************************
''' Plot Processed Maps'''
#************************************
def plotMaps(X, Y, A, label):
print(' Plotting ' + label + ' Map...\n')
import scipy.interpolate
xi = np.linspace(min(X), max(X))
yi = np.linspace(min(Y), max(Y))
xi, yi = np.meshgrid(xi, yi)
rbf = scipy.interpolate.Rbf(Y, -X, A, function='linear')
zi = rbf(xi, yi)
import matplotlib.pyplot as plt
plt.imshow(zi, vmin=A.min(), vmax=A.max(), origin='lower',label='data',
extent=[X.min(), X.max(), Y.min(), Y.max()])
plt.title(label)
plt.xlabel('X [um]')
plt.ylabel('Y [um]')
plt.show()
####################################################################
''' Make header, if absent, for the summary file '''
####################################################################
def makeHeaderSummary(file, learnFile):
if os.path.isfile(file) == False:
summaryHeader1 = ['Training File:', learnFile]
summaryHeader2 = ['File','SVM-HC','SVM-Prob%', 'NN-HC', 'NN-Prob%', 'TF-HC', 'TF-Prob%', 'TF-Accuracy%']
with open(file, "a") as sum_file:
csv_out=csv.writer(sum_file)
csv_out.writerow(summaryHeader1)
csv_out.writerow(summaryHeader2)
sum_file.close()
#************************************
''' Lists the program usage '''
#************************************
def usage():
print('\n Usage:\n')
print(' Single files:')
print(' python3 SpectraLearnPredict.py -f <learningfile> <spectrafile>\n')
print(' Cross-validation for accuracy determination:')
print(' python3 SpectraLearnPredict.py -a <learningfile> <testdataset>\n')
print(' Cross-validation for accuracy determination (automatic splitting):')
print(' python3 SpectraLearnPredict.py -a <learningfile>\n')
print(' Maps (formatted for Horiba LabSpec):')
print(' python3 SpectraLearnPredict.py -m <learningfile> <spectramap>\n')
print(' Batch txt files:')
print(' python3 SpectraLearnPredict.py -b <learningfile>\n')
print(' K-means on maps:')
print(' python3 SpectraLearnPredict.py -k <spectramap> <number_of_classes>\n')
print(' Principal component analysis on spectral collection files: ')
print(' python3 SpectraLearnPredict.py -p <spectrafile> <#comp>\n')
print(' Requires python 3.x. Not compatible with python 2.x\n')
#************************************
''' Info on Classification Report '''
#************************************
def runClassReport(clf, A, Cl):
from sklearn.metrics import classification_report
y_pred = clf.predict(A)
print(classification_report(Cl, y_pred, target_names=clf.classes_))
print(' Precision is the probability that, given a classification result for a sample,\n' +
' the sample actually belongs to that class. Recall (Accuracy) is the probability that a \n' +
' sample will be correctly classified for a given class. f1-score combines both \n' +
' accuracy and precision to give a single measure of relevancy of the classifier results.\n')
#************************************
''' Introduce Noise in Data '''
#************************************
def scrambleNoise(A, offset):
from random import uniform
for i in range(A.shape[1]):
A[:,i] += offset*uniform(-1,1)
#********************************************************************************
''' TensorFlow '''
''' Basic Tensorflow '''
''' https://www.tensorflow.org/get_started/mnist/beginners'''
#********************************************************************************
''' Train basic TF training via TensorFlow- '''
#********************************************************************************
def trainTF(A, Cl, A_test, Cl_test, Root):
print('==========================================================================\n')
print('\033[1m Running Basic TensorFlow...\033[0m')
import time
import tensorflow as tf
import tensorflow.contrib.learn as skflow
from sklearn import preprocessing
if tfDef.logCheckpoint == True:
tf.logging.set_verbosity(tf.logging.INFO)
tfTrainedData = Root + '.tfmodel'
print("\n Training model saved in: ", tfTrainedData, "\n")
#**********************************************
''' Initialize Estimator and training data '''
#**********************************************
print(' Initializing TensorFlow...')
tf.reset_default_graph()
totA = np.vstack((A, A_test))
totCl = np.append(Cl, Cl_test)
numTotClasses = np.unique(totCl).size
le = preprocessing.LabelBinarizer()
totCl2 = le.fit_transform(totCl) # this is original from DNNTF
Cl2 = le.transform(Cl) # this is original from DNNTF
Cl2_test = le.transform(Cl_test)
#validation_monitor = skflow.monitors.ValidationMonitor(input_fn=lambda: input_fn(A_test, Cl2_test),
# eval_steps=1,
# every_n_steps=dnntfDef.valMonitorSecs)
#**********************************************
''' Construct TF model '''
#**********************************************
x,y,y_,z = setupTFmodel(A, totCl)
with tf.variable_scope('Loss'):
losses = tf.nn.sigmoid_cross_entropy_with_logits(None, tf.cast(y_, tf.float32), z)
loss_op = tf.reduce_mean(losses)
with tf.variable_scope('Accuracy'):
y_pred = tf.cast(z > 0, tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(y_pred, y_), tf.float32))
accuracy = tf.Print(accuracy, data=[accuracy], message="accuracy:")
# We add the training operation, ...
adam = tf.train.AdamOptimizer(1e-2)
train_op = adam.minimize(loss_op, name="train_op")
startTime = time.time()
with tf.Session() as sess:
# ... init our variables, ...
sess.run(tf.global_variables_initializer())
# ... check the accuracy before training, ...
print(" Run initial session...")
x_input, y_input = sess.run([tf.stack(A), tf.stack(Cl2)])
print(" Check initial accuracy...")
sess.run(accuracy, feed_dict={x: A_test, y_: Cl2_test})
# ... train ...
for i in range(10):
# ... by sampling some input data (fetching) ...
print(" Run session:",str(i))
x_input, y_input = sess.run([tf.stack(A), tf.stack(Cl2)])
print(x_input)
# ... and feeding it to our model
_, loss = sess.run([train_op, loss_op], feed_dict={x: x_input,y_: y_input})
print(" Loss:",str(loss))
sess.run(accuracy, feed_dict={x: A_test,y_: Cl2_test})
# We regularly check the loss
if i % 10 == 0:
print('iter:%d - loss:%f' % (i, loss))
# Finally, we check our final accuracy
x_input, y_input = sess.run([tf.stack(A), tf.stack(Cl2)])
sess.run(accuracy, feed_dict={x: A_test,y_: Cl2_test})
print("Time taken: %f" % (time.time() - startTime))
'''
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
if tfDef.decayLearnRate == True:
print(' Using decaying learning rate, start at:',tfDef.learnRate, '\n')
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = tfDef.learnRate
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, 100000, 0.96, staircase=True)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy, global_step=global_step)
else:
print(' Using fix learning rate:', tfDef.learnRate, '\n')
train_step = tf.train.GradientDescentOptimizer(tfDef.learnRate).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
if tfDef.enableTensorboard == True:
writer = tf.summary.FileWriter(".", sess.graph)
print('\n Saving graph. Accessible via tensorboard. \n')
saver = tf.train.Saver()
accur = 0
#**********************************************
#**********************************************
try:
if tfDef.alwaysRetrain == False:
print(' Opening TF training model from:', tfTrainedData)
saver.restore(sess, './' + tfTrainedData)
print('\n Model restored.\n')
else:
raise ValueError(' Force TF model retraining.')
except:
init = tf.global_variables_initializer()
sess.run(init)
if os.path.isfile(tfTrainedData + '.meta') & tfDef.alwaysImprove == True:
print('\n Improving TF model...')
saver.restore(sess, './' + tfTrainedData)
else:
print('\n Rebuildind TF model...')
print(' Performing training using subset (' + str(tfDef.percentCrossValid*100) + '%)')
summary = sess.run(train_step, feed_dict={x: A, y_: Cl2})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
accuracy_score = 100*accuracy.eval(feed_dict={x:A_test, y_:Cl2_test})
save_path = saver.save(sess, tfTrainedData)
print(' Model saved in file: %s\n' % save_path)
if tfDef.enableTensorboard == True:
writer.close()
sess.close()
print('\n ================================')
print(' \033[1mDNN-TF\033[0m - Accuracy')
print(' ================================')
print("\n Accuracy: {:.2f}%".format(accuracy_score))
#print(" Loss: {:.2f}".format(accuracy_score["loss"]))
#print(" Global step: {:.2f}\n".format(accuracy_score["global_step"]))
print(' ================================\n')
'''
#**********************************************
''' Predict using basic Tensorflow '''
#**********************************************
def predTF(A, Cl, R, Root):
print('==========================================================================\n')
print('\033[1m Running Basic TensorFlow Prediction...\033[0m')
import tensorflow as tf
import tensorflow.contrib.learn as skflow
from sklearn import preprocessing
if tfDef.logCheckpoint == True:
tf.logging.set_verbosity(tf.logging.INFO)
tfTrainedData = Root + '.tfmodel'
x,y,y_ = setupTFmodel(A, Cl)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
print(' Opening TF training model from:', tfTrainedData)
saver = tf.train.Saver()
saver.restore(sess, './' + tfTrainedData)
res1 = sess.run(y, feed_dict={x: R})
res2 = sess.run(tf.argmax(y, 1), feed_dict={x: R})
sess.close()
rosterPred = np.where(res1[0]>tfDef.thresholdProbabilityTFPred)[0]
print('\n ==============================')
print(' \033[1mTF\033[0m - Probability >',str(tfDef.thresholdProbabilityTFPred),'%')
print(' ==============================')
print(' Prediction\tProbability [%]')
for i in range(rosterPred.shape[0]):
print(' ',str(np.unique(Cl)[rosterPred][i]),'\t\t',str('{:.1f}'.format(res1[0][rosterPred][i])))
print(' ==============================\n')
print('\033[1m Predicted value (TF): ' + str(np.unique(Cl)[res2][0]) + ' (Probability: ' + str('{:.1f}'.format(res1[0][res2][0])) + '%)\n' + '\033[0m' )
return np.unique(Cl)[res2][0], res1[0][res2][0]
#**********************************************
''' Setup Tensorflow Model'''
#**********************************************
def setupTFmodel(A, Cl):
import tensorflow as tf
tf.reset_default_graph()
x = tf.placeholder(tf.float32, [None, A.shape[1]])
y_ = tf.placeholder(tf.float32, [None, np.unique(Cl).shape[0]])
W = tf.Variable(tf.zeros([A.shape[1], A.shape[1]]),name="W1")
b = tf.Variable(tf.zeros(A.shape[1]),name="b")
# The raw formulation of cross-entropy can be numerically unstable
#y = tf.nn.softmax(tf.matmul(x, W) + b)
#cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), axis=[1]))
# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
# outputs of 'y', and then average across the batch.
z = tf.matmul(x,W) + b
y = tf.nn.relu(z)
W2 = tf.Variable(tf.zeros([A.shape[1], np.unique(Cl).shape[0]]),name="W2")
b2 = tf.Variable(tf.zeros(np.unique(Cl).shape[0]),name="b2")
z = tf.matmul(y, W2) + b2
return x, y, y_, z
#************************************
''' Main initialization routine '''
#************************************
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
google-research/dp-imagenet | benchmark/mnist_tf.py | 1 | 4616 | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs MNIST training with differential privacy using Tensorflow."""
import time
from absl import app
from absl import flags
from absl import logging
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
from tensorflow_privacy.privacy.keras_models.dp_keras_model import DPSequential
flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, train with vanilla SGD.')
flags.DEFINE_float('learning_rate', .25, 'Learning rate for training')
flags.DEFINE_float('noise_multiplier', 1.3, 'Ratio of the standard deviation to the clipping norm')
flags.DEFINE_float('l2_norm_clip', 1.5, 'Clipping norm')
flags.DEFINE_integer('batch_size', 250, 'Batch size')
flags.DEFINE_integer('epochs', 15, 'Number of epochs')
flags.DEFINE_integer(
'microbatches', 250, 'Number of microbatches (must evenly divide batch_size)')
FLAGS = flags.FLAGS
def load_mnist():
MNIST_MEAN = 0.1307
MNIST_STD = 0.3081
"""Loads MNIST and preprocesses to combine training and validation data."""
train, test = tf.keras.datasets.mnist.load_data()
train_data, train_labels = train
test_data, test_labels = test
train_data = (np.array(train_data, dtype=np.float32) / 255 - MNIST_MEAN) / MNIST_STD
test_data = (np.array(test_data, dtype=np.float32) / 255 - MNIST_MEAN) / MNIST_STD
train_data = train_data.reshape((train_data.shape[0], 28, 28, 1))
test_data = test_data.reshape((test_data.shape[0], 28, 28, 1))
train_labels = np.array(train_labels, dtype=np.int32)
test_labels = np.array(test_labels, dtype=np.int32)
return train_data, train_labels, test_data, test_labels
def main(unused_argv):
logging.set_verbosity(logging.ERROR)
if FLAGS.microbatches == -1:
FLAGS.microbatches = FLAGS.batch_size
if FLAGS.dpsgd and FLAGS.batch_size % FLAGS.microbatches != 0:
raise ValueError('Number of microbatches should divide evenly batch_size')
# Load training and test data.
train_data, train_labels, test_data, test_labels = load_mnist()
# Define a sequential Keras model
layers = [
tf.keras.layers.Conv2D(16, 8,
strides=2,
padding='same',
activation='relu',
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPool2D(2, 1),
tf.keras.layers.Conv2D(32, 4,
strides=2,
padding='valid',
activation='relu'),
tf.keras.layers.MaxPool2D(2, 1),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(10)
]
if FLAGS.dpsgd:
model = DPSequential(
l2_norm_clip=FLAGS.l2_norm_clip,
noise_multiplier=FLAGS.noise_multiplier,
layers=layers)
else:
model = tf.keras.Sequential(layers=layers)
optimizer = tf.keras.optimizers.SGD(learning_rate=FLAGS.learning_rate)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# Compile model with Keras
model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
# Train model with Keras
epoch_time = []
for epoch in range(FLAGS.epochs):
start_time = time.time()
model.fit(train_data, train_labels,
epochs=1,
validation_data=None,
batch_size=FLAGS.batch_size)
epoch_time.append(time.time() - start_time)
print(f"Train Epoch: {epoch} \t took {epoch_time[-1]} seconds")
print('Average epoch time (all epochs): ', np.average(epoch_time))
print('Median epoch time (all epochs): ', np.median(epoch_time))
print('Average epoch time (except first): ', np.average(epoch_time[1:]))
print('Median epoch time (except first): ', np.median(epoch_time[1:]))
model.evaluate(test_data, test_labels, batch_size=FLAGS.batch_size)
if __name__ == '__main__':
app.run(main) | apache-2.0 |
tdhopper/scikit-learn | examples/classification/plot_digits_classification.py | 287 | 2397 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
samzhang111/scikit-learn | benchmarks/bench_plot_lasso_path.py | 299 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
phoebe-project/phoebe2-docs | 2.4/tutorials/design_concepts.py | 6 | 7730 | #!/usr/bin/env python
# coding: utf-8
# General Design Concepts of PHOEBE 2
# ======================
#
# This document aims to introduce the design concepts for PHOEBE 2 and explain **why** it is designed in a slightly non-pythonic way. Hopefully understanding the rationale for these decisions will help to make the learning curve a little less steep - but feel free to skip straight to the tutorials as all the concepts will be re-explained there within the context of PHOEBE.
#
# General Goals of PHOEBE 2
# ------------------------------------------
#
# Many eclipsing binary codes exist in the field, each with their own strenghts and weaknesses. The major motivation for *re-writing* PHOEBE into PHOEBE 2 was to address the improvement in photometric precision in the Kepler-era. As such, we try to emphasize the following goals even when at the cost of others:
#
# * **Precision**: achieve a precision (both numerical and in terms of advanced physics) necessary to adequately model observations from Kepler and future space missions;
# * **Flexibility**: build a framework that is modular and allows for future expansion to include new physics and observables;
# * **Self-Consistency**: re-use the same frameworks and concepts within the code so that once the basics are mastered, more advanced functionality should be easy to learn;
# * **User-friendly**: replace the custom scripting language used in PHOEBE legacy with the widely-adopted Python scripting language.
#
# Whenever possible, without sacrificing the above goals, we also aim to achieve:
#
# * **Efficiency**: optimize the code to minimize computation time;
# * **Simplicity**: don't overcomplicate things and try to keep the learning-curve manageable.
# Why not use simple function calls?
# -----------------------------------------------
#
# Placing yourself in the shoes of a developer designing a code to model observables of eclipsing binary systems in Python, you would probably first consider the simplest option which is to have simple functions for each observable type to generate synthetic models.
#
# So for example, something like:
#
# ```
# fluxes = phoebe.lc(times=[...], teffA=6000, teffB=5500, massA=1.1, ...)
# rvs = phoebe.rv(times=[...], ...)
# ```
#
# Although this is easy to learn and very intuitive, it does have several significant drawbacks for our use-case:
# * Gets unwieldy if you want to pass a lot of (non-default) options
# * Using A and B suffixes (ie. `teffA` vs `teffB`) for two stars in a binary makes sense, but becomes much more complicated if we want to extend to triples, quadruples, etc.
# * Does not allow for any optimizations between observables/datasets. One of the more expensive steps in numerical modeling of EBs is creating the mesh of the star. Here if we want a light curve and radial velocity curve at the same time, we are unnecessarily duplicating that step.
# Ok, so why not objects with attributes and methods?
# ------------------------------------------------------
#
# Any Python developer considering these drawbacks above will immediately realize that classes/objects will solve them. We can now have a `Orbit` and `Star` class and instantiate one `Orbit` and two `Stars` (for a binary system).
#
# Imagine something like:
#
# ```
# primary = Star(teff=6000, mass=1.2)
# primary.requiv = 1.1
#
# secondary = Star(teff=5500, mass=0.9, requiv=0.9)
#
# orbit = Orbit(period=3.14, primary=primary, secondary=secondary)
# print(orbit.primary.mass)
# ```
#
# Now dealing with a lot of non-default options is a lot cleaner, as they can either be passed to the instantiation of the respective `Orbit` or `Star`, *or* we can change the attribute on the object later.
#
# A framework like this also adds a built-in object hierarchy. We no longer have `teffA` vs `teffB` but rather `orbit.primary.teff` vs `orbit.secondary.teff`, which will extend much nicer to higher-ordered systems.
#
# We could pass this `orbit` object to the functions above, but then we still have the optimization issue. So instead, we would also want include our dataset information (passbands, etc) so that we can compute all of our datasets at once. Maybe something like:
#
# ```
# orbit.add_lc(LC(times=[...], passband='Johnson:V', ...))
# orbit.add_rv(RV(times=[...], ...))
#
# fluxes, rvs = phoebe.create_synthetics(orbit)
# ```
#
# We've now addressed all of the drawbacks from the simple function design, but we have uncovered a few more:
# * Some parameters needed to define a dataset are still star-dependent - so where are these stored? For example, would we expect access limb-darkening as `orbit.lcs[0].ld.primary` or `orbit.primary.lcs[0].ld` or `orbit.lcs[0].primary.ld`?
# * Do we define a mass-ratio `q` in the `Orbit` class or individual masses in the `Star` class?
#
# The two points above have really driven the design of the PHOEBE 2 interface and add significant power and flexibility, but it has come at the cost of a somewhat non-pythonic and a steep learning curve.
# Introducing the Bundle
# ----------------------------------------------
#
# ### a database-like store of parameters
#
# What we really need to address the first point above is a *database* of options, where each *parameter* is tagged with the *dataset* and *component/star* that it is associate with. This would then remove the fixed, ambiguous, hierarchy of the parameters and allow the user to access the parameters through any order of "drilling-down" (what we call *filtering* in PHOEBE 2).
#
# In pseudo-code, this will look something like:
#
# ```
# b = phoebe.default_binary()
# b.get_parameter(qualifier='teff', component='primary')
# b.get_value(qualifier='teff', component='primary')
# ```
#
# for the case of `teffA` vs `teffB` from before. Or for the case of limb-darkening:
#
# ```
# b.get_value(qualifier='ld', component='primary', dataset='mylc')
# ```
#
# The first tutorial on [general concepts](general_concepts.ipynb) discusses the actual implementation and syntax in detail.
#
# ### support for custom parameterizations via "constraints"
#
# What we would really like to do for the second point (mass-ratio vs individual masses, semi-major axis and inclination vs asini, etc) is to have a framework that supports any *combination* of parameterizations by telling the parameters the equations that relate them to each other. These are what we call *constraints* in PHOEBE 2.
#
# In pseudo-code, this will look something like:
#
# ```
# b.get_value(qualifier='mass', component='primary')
# 1.2
# b.set_value(qualifier='q', component='binary', value=0.8)
# b.get_value(qualifier='mass', component='primary')
# 1.4
# ```
#
# Here the masses of the individual stars are *read-only* parameters which know how to be computed from `q` (and the orbital period and semi-major axis - following Kepler's third law). If you wanted to provide (or fit for) the primary mass instead of q, we can have PHOEBE re-arrange the equations to make q read-only instead. This looks something like:
#
# ```
# b.flip_constraint(qualifier='mass', component='primary', solve_for='q')
#
# b.set_value(qualifier='mass', component='primary', value=1.2)
# b.get_value(qualifier='q', component='binary')
# 1.2
# ```
#
# The tutorial on [constraints](constraints.ipynb) covers these concepts in detail. Although the advanced functionality can somewhat be ignored unless you want the flexibility to change the default parameterization, it is still important to be aware of constraints, how they work, and the fact that some parameters are "read-only" by default. However, once you learn how to use (and perhaps abuse) constraints, a large number of advanced use-cases are opened up.
#
# In[ ]:
| gpl-3.0 |
bthirion/scikit-learn | examples/cluster/plot_face_compress.py | 73 | 2198 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
Face, a 1024 x 768 size image of a raccoon face,
is used here to illustrate how `k`-means is
used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
try: # SciPy >= 0.16 have face in misc
from scipy.misc import face
face = face(gray=True)
except ImportError:
face = sp.face(gray=True)
n_clusters = 5
np.random.seed(0)
X = face.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
face_compressed = np.choose(labels, values)
face_compressed.shape = face.shape
vmin = face.min()
vmax = face.max()
# original face
plt.figure(1, figsize=(3, 2.2))
plt.imshow(face, cmap=plt.cm.gray, vmin=vmin, vmax=256)
# compressed face
plt.figure(2, figsize=(3, 2.2))
plt.imshow(face_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# equal bins face
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, face) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_face = np.choose(regular_labels.ravel(), regular_values, mode="clip")
regular_face.shape = face.shape
plt.figure(3, figsize=(3, 2.2))
plt.imshow(regular_face, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# histogram
plt.figure(4, figsize=(3, 2.2))
plt.clf()
plt.axes([.01, .01, .98, .98])
plt.hist(X, bins=256, color='.5', edgecolor='.5')
plt.yticks(())
plt.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
plt.show()
| bsd-3-clause |
abimannans/scikit-learn | examples/datasets/plot_iris_dataset.py | 281 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
tdhopper/scikit-learn | examples/datasets/plot_iris_dataset.py | 281 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
Delphine-L/tools-iuc | tools/fsd/fsd.py | 12 | 44897 | #!/usr/bin/env python
# Family size distribution of SSCSs
#
# Author: Monika Heinzl, Johannes-Kepler University Linz (Austria)
# Contact: monika.heinzl@edumail.at
#
# Takes at least one TABULAR file with tags before the alignment to the SSCS, but up to 4 files can be provided, as input.
# The program produces a plot which shows the distribution of family sizes of the all SSCSs from the input files and
# a tabular file with the data of the plot, as well as a TXT file with all tags of the DCS and their family sizes.
# If only one file is provided, then a family size distribution, which is separated after SSCSs without a partner and DCSs, is produced.
# Whereas a family size distribution with multiple data in one plot is produced, when more than one file (up to 4) is given.
# USAGE: python FSD_Galaxy_1.4_commandLine_FINAL.py --inputFile1 filename --inputName1 filename --inputFile2 filename2 --inputName2 filename2 --inputFile3 filename3 --inputName3 filename3 --inputFile4 filename4 --inputName4 filename4 --log_axis --output_tabular outptufile_name_tabular --output_pdf outptufile_name_pdf
import argparse
import sys
import matplotlib.pyplot as plt
import numpy
from matplotlib.backends.backend_pdf import PdfPages
plt.switch_backend('agg')
def readFileReferenceFree(file):
with open(file, 'r') as dest_f:
data_array = numpy.genfromtxt(dest_f, skip_header=0, delimiter='\t', comments='#', dtype=str)
return(data_array)
def make_argparser():
parser = argparse.ArgumentParser(description='Family Size Distribution of duplex sequencing data')
parser.add_argument('--inputFile1', help='Tabular File with three columns: ab or ba, tag and family size.')
parser.add_argument('--inputName1')
parser.add_argument('--inputFile2', default=None, help='Tabular File with three columns: ab or ba, tag and family size.')
parser.add_argument('--inputName2')
parser.add_argument('--inputFile3', default=None, help='Tabular File with three columns: ab or ba, tag and family size.')
parser.add_argument('--inputName3')
parser.add_argument('--inputFile4', default=None, help='Tabular File with three columns: ab or ba, tag and family size.')
parser.add_argument('--inputName4')
parser.add_argument('--log_axis', action="store_false", help='Transform y axis in log scale.')
parser.add_argument('--rel_freq', action="store_false", help='If False, the relative frequencies are displayed.')
parser.add_argument('--output_pdf', default="data.pdf", type=str, help='Name of the pdf file.')
parser.add_argument('--output_tabular', default="data.tabular", type=str, help='Name of the tabular file.')
return parser
def compare_read_families(argv):
parser = make_argparser()
args = parser.parse_args(argv[1:])
firstFile = args.inputFile1
name1 = args.inputName1
secondFile = args.inputFile2
name2 = args.inputName2
thirdFile = args.inputFile3
name3 = args.inputName3
fourthFile = args.inputFile4
name4 = args.inputName4
log_axis = args.log_axis
rel_freq = args.rel_freq
title_file = args.output_tabular
title_file2 = args.output_pdf
sep = "\t"
plt.rc('figure', figsize=(11.69, 8.27)) # A4 format
plt.rcParams['patch.edgecolor'] = "black"
plt.rcParams['axes.facecolor'] = "E0E0E0" # grey background color
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 14
list_to_plot = []
label = []
data_array_list = []
list_to_plot_original = []
colors = []
bins = numpy.arange(1, 22)
with open(title_file, "w") as output_file, PdfPages(title_file2) as pdf:
fig = plt.figure()
fig.subplots_adjust(left=0.12, right=0.97, bottom=0.23, top=0.94, hspace=0)
fig2 = plt.figure()
fig2.subplots_adjust(left=0.12, right=0.97, bottom=0.23, top=0.94, hspace=0)
if firstFile is not None:
file1 = readFileReferenceFree(firstFile)
integers = numpy.array(file1[:, 0]).astype(int) # keep original family sizes
list_to_plot_original.append(integers)
colors.append("#0000FF")
# for plot: replace all big family sizes by 22
data1 = numpy.clip(integers, bins[0], bins[-1])
name1 = name1.split(".tabular")[0]
if len(name1) > 40:
name1 = name1[:40]
list_to_plot.append(data1)
label.append(name1)
data_array_list.append(file1)
legend = "\n\n\n{}".format(name1)
fig.text(0.05, 0.11, legend, size=10, transform=plt.gcf().transFigure)
fig2.text(0.05, 0.11, legend, size=10, transform=plt.gcf().transFigure)
legend1 = "singletons:\nnr. of tags\n{:,} ({:.3f})".format(numpy.bincount(data1)[1],
float(numpy.bincount(data1)[1]) / len(data1))
fig.text(0.32, 0.11, legend1, size=10, transform=plt.gcf().transFigure)
fig2.text(0.32, 0.11, legend1, size=10, transform=plt.gcf().transFigure)
legend3b = "PE reads\n{:,} ({:.3f})".format(numpy.bincount(data1)[1],
float(numpy.bincount(data1)[1]) / sum(integers))
fig.text(0.45, 0.11, legend3b, size=10, transform=plt.gcf().transFigure)
fig2.text(0.45, 0.11, legend3b, size=10, transform=plt.gcf().transFigure)
legend4 = "family size > 20:\nnr. of tags\n{:,} ({:.3f})".format(len(integers[integers > 20]),
float(len(integers[integers > 20])) / len(integers))
fig.text(0.58, 0.11, legend4, size=10, transform=plt.gcf().transFigure)
fig2.text(0.58, 0.11, legend4, size=10, transform=plt.gcf().transFigure)
legend5 = "PE reads\n{:,} ({:.3f})".format(sum(integers[integers > 20]),
float(sum(integers[integers > 20])) / sum(integers))
fig.text(0.70, 0.11, legend5, size=10, transform=plt.gcf().transFigure)
fig2.text(0.70, 0.11, legend5, size=10, transform=plt.gcf().transFigure)
legend6 = "total nr. of\ntags\n{:,}".format(len(data1))
fig.text(0.82, 0.11, legend6, size=10, transform=plt.gcf().transFigure)
fig2.text(0.82, 0.11, legend6, size=10, transform=plt.gcf().transFigure)
legend6b = "PE reads\n{:,}".format(sum(integers))
fig.text(0.89, 0.11, legend6b, size=10, transform=plt.gcf().transFigure)
fig2.text(0.89, 0.11, legend6b, size=10, transform=plt.gcf().transFigure)
if secondFile is not None:
file2 = readFileReferenceFree(secondFile)
integers2 = numpy.array(file2[:, 0]).astype(int) # keep original family sizes
list_to_plot_original.append(integers2)
colors.append("#298A08")
data2 = numpy.clip(integers2, bins[0], bins[-1])
list_to_plot.append(data2)
name2 = name2.split(".tabular")[0]
if len(name2) > 40:
name2 = name2[:40]
label.append(name2)
data_array_list.append(file2)
fig.text(0.05, 0.09, name2, size=10, transform=plt.gcf().transFigure)
fig2.text(0.05, 0.09, name2, size=10, transform=plt.gcf().transFigure)
legend1 = "{:,} ({:.3f})".format(numpy.bincount(data2)[1], float(numpy.bincount(data2)[1]) / len(data2))
fig.text(0.32, 0.09, legend1, size=10, transform=plt.gcf().transFigure)
fig2.text(0.32, 0.09, legend1, size=10, transform=plt.gcf().transFigure)
legend3 = "{:,} ({:.3f})".format(numpy.bincount(data2)[1], float(numpy.bincount(data2)[1]) / sum(integers2))
fig.text(0.45, 0.09, legend3, size=10, transform=plt.gcf().transFigure)
fig2.text(0.45, 0.09, legend3, size=10, transform=plt.gcf().transFigure)
legend4 = "{:,} ({:.3f})".format(len(integers2[integers2 > 20]),
float(len(integers2[integers2 > 20])) / len(integers2))
fig.text(0.58, 0.09, legend4, size=10, transform=plt.gcf().transFigure)
fig2.text(0.58, 0.09, legend4, size=10, transform=plt.gcf().transFigure)
legend5 = "{:,} ({:.3f})".format(sum(integers2[integers2 > 20]),
float(sum(integers2[integers2 > 20])) / sum(integers2))
fig.text(0.70, 0.09, legend5, size=10, transform=plt.gcf().transFigure)
fig2.text(0.70, 0.09, legend5, size=10, transform=plt.gcf().transFigure)
legend6 = "{:,}".format(len(data2))
fig.text(0.82, 0.09, legend6, size=10, transform=plt.gcf().transFigure)
fig2.text(0.82, 0.09, legend6, size=10, transform=plt.gcf().transFigure)
legend6b = "{:,}".format(sum(integers2))
fig.text(0.89, 0.09, legend6b, size=10, transform=plt.gcf().transFigure)
fig2.text(0.89, 0.09, legend6b, size=10, transform=plt.gcf().transFigure)
if thirdFile is not None:
file3 = readFileReferenceFree(thirdFile)
integers3 = numpy.array(file3[:, 0]).astype(int) # keep original family sizes
list_to_plot_original.append(integers3)
colors.append("#DF0101")
data3 = numpy.clip(integers3, bins[0], bins[-1])
list_to_plot.append(data3)
name3 = name3.split(".tabular")[0]
if len(name3) > 40:
name3 = name3[:40]
label.append(name3)
data_array_list.append(file3)
fig.text(0.05, 0.07, name3, size=10, transform=plt.gcf().transFigure)
fig2.text(0.05, 0.07, name3, size=10, transform=plt.gcf().transFigure)
legend1 = "{:,} ({:.3f})".format(numpy.bincount(data3)[1], float(numpy.bincount(data3)[1]) / len(data3))
fig.text(0.32, 0.07, legend1, size=10, transform=plt.gcf().transFigure)
fig2.text(0.32, 0.07, legend1, size=10, transform=plt.gcf().transFigure)
legend3b = "{:,} ({:.3f})".format(numpy.bincount(data3)[1],
float(numpy.bincount(data3)[1]) / sum(integers3))
fig.text(0.45, 0.07, legend3b, size=10, transform=plt.gcf().transFigure)
fig2.text(0.45, 0.07, legend3b, size=10, transform=plt.gcf().transFigure)
legend4 = "{:,} ({:.3f})".format(len(integers3[integers3 > 20]),
float(len(integers3[integers3 > 20])) / len(integers3))
fig.text(0.58, 0.07, legend4, size=10, transform=plt.gcf().transFigure)
fig2.text(0.58, 0.07, legend4, size=10, transform=plt.gcf().transFigure)
legend5 = "{:,} ({:.3f})".format(sum(integers3[integers3 > 20]),
float(sum(integers3[integers3 > 20])) / sum(integers3))
fig.text(0.70, 0.07, legend5, size=10, transform=plt.gcf().transFigure)
fig2.text(0.70, 0.07, legend5, size=10, transform=plt.gcf().transFigure)
legend6 = "{:,}".format(len(data3))
fig.text(0.82, 0.07, legend6, size=10, transform=plt.gcf().transFigure)
fig2.text(0.82, 0.07, legend6, size=10, transform=plt.gcf().transFigure)
legend6b = "{:,}".format(sum(integers3))
fig.text(0.89, 0.07, legend6b, size=10, transform=plt.gcf().transFigure)
fig2.text(0.89, 0.07, legend6b, size=10, transform=plt.gcf().transFigure)
if fourthFile is not None:
file4 = readFileReferenceFree(fourthFile)
integers4 = numpy.array(file4[:, 0]).astype(int) # keep original family sizes
list_to_plot_original.append(integers4)
colors.append("#04cec7")
data4 = numpy.clip(integers4, bins[0], bins[-1])
list_to_plot.append(data4)
name4 = name4.split(".tabular")[0]
if len(name4) > 40:
name4 = name4[:40]
label.append(name4)
data_array_list.append(file4)
fig.text(0.05, 0.05, name4, size=10, transform=plt.gcf().transFigure)
fig2.text(0.05, 0.05, name4, size=10, transform=plt.gcf().transFigure)
legend1 = "{:,} ({:.3f})".format(numpy.bincount(data4)[1], float(numpy.bincount(data4)[1]) / len(data4))
fig.text(0.32, 0.05, legend1, size=10, transform=plt.gcf().transFigure)
fig2.text(0.32, 0.05, legend1, size=10, transform=plt.gcf().transFigure)
legend3b = "{:,} ({:.3f})".format(numpy.bincount(data4)[1],
float(numpy.bincount(data4)[1]) / sum(integers4))
fig.text(0.45, 0.05, legend3b, size=10, transform=plt.gcf().transFigure)
fig2.text(0.45, 0.05, legend3b, size=10, transform=plt.gcf().transFigure)
legend4 = "{:,} ({:.3f})".format(len(integers4[integers4 > 20]),
float(len(integers4[integers4 > 20])) / len(integers4))
fig.text(0.58, 0.05, legend4, size=10, transform=plt.gcf().transFigure)
fig2.text(0.58, 0.05, legend4, size=10, transform=plt.gcf().transFigure)
legend5 = "{:,} ({:.3f})".format(sum(integers4[integers4 > 20]),
float(sum(integers4[integers4 > 20])) / sum(integers4))
fig.text(0.70, 0.05, legend5, size=10, transform=plt.gcf().transFigure)
fig2.text(0.70, 0.05, legend5, size=10, transform=plt.gcf().transFigure)
legend6 = "{:,}".format(len(data4))
fig.text(0.82, 0.05, legend6, size=10, transform=plt.gcf().transFigure)
fig2.text(0.82, 0.05, legend6, size=10, transform=plt.gcf().transFigure)
legend6b = "{:,}".format(sum(integers4))
fig.text(0.89, 0.05, legend6b, size=10, transform=plt.gcf().transFigure)
fig2.text(0.89, 0.05, legend6b, size=10, transform=plt.gcf().transFigure)
list_to_plot2 = list_to_plot
if rel_freq:
ylab = "Relative Frequency"
else:
ylab = "Absolute Frequency"
# PLOT FSD based on tags
fig.suptitle('Family Size Distribution (FSD) based on families', fontsize=14)
ax = fig.add_subplot(1, 1, 1)
ticks = numpy.arange(1, 22, 1)
ticks1 = [str(_) for _ in ticks]
ticks1[len(ticks1) - 1] = ">20"
ax.set_xticks([], [])
if rel_freq:
w = [numpy.zeros_like(data) + 1. / len(data) for data in list_to_plot2]
counts = ax.hist(list_to_plot2, weights=w, bins=numpy.arange(1, 23), stacked=False, edgecolor="black", color=colors, linewidth=1, label=label, align="left", alpha=0.7, rwidth=0.8)
ax.set_ylim(0, 1.07)
else:
counts = ax.hist(list_to_plot2, bins=numpy.arange(1, 23), stacked=False, edgecolor="black", linewidth=1, label=label, align="left", alpha=0.7, rwidth=0.8, color=colors)
ax.set_xticks(numpy.array(ticks))
ax.set_xticklabels(ticks1)
ax.legend(loc='upper right', fontsize=14, frameon=True, bbox_to_anchor=(0.9, 1))
ax.set_ylabel(ylab, fontsize=14)
ax.set_xlabel("Family size", fontsize=14)
if log_axis:
ax.set_yscale('log')
ax.grid(b=True, which="major", color="#424242", linestyle=":")
ax.margins(0.01, None)
pdf.savefig(fig)
# PLOT FSD based on PE reads
fig2.suptitle('Family Size Distribution (FSD) based on PE reads', fontsize=14)
ax2 = fig2.add_subplot(1, 1, 1)
ticks = numpy.arange(1, 22)
ticks1 = [str(_) for _ in ticks]
ticks1[len(ticks1) - 1] = ">20"
reads = []
reads_rel = []
barWidth = 0 - (len(list_to_plot) + 1) / 2 * 1. / (len(list_to_plot) + 1)
ax2.set_xticks([], [])
for i in range(len(list_to_plot2)):
x = list(numpy.arange(1, 22).astype(float))
unique, c = numpy.unique(list_to_plot2[i], return_counts=True)
y = unique * c
if sum(list_to_plot_original[i] > 20) > 0:
y[len(y) - 1] = sum(list_to_plot_original[i][list_to_plot_original[i] > 20])
y = [y[x[idx] == unique][0] if x[idx] in unique else 0 for idx in range(len(x))]
reads.append(y)
reads_rel.append(list(numpy.float_(y)) / sum(y))
if len(list_to_plot2) == 1:
x = [xi * 0.5 for xi in x]
w = 0.4
else:
x = [xi + barWidth for xi in x]
w = 1. / (len(list_to_plot) + 1)
if rel_freq:
ax2.bar(x, list(numpy.float_(y)) / numpy.sum(y), align="edge", width=w, edgecolor="black", label=label[i], linewidth=1, alpha=0.7, color=colors[i])
ax2.set_ylim(0, 1.07)
else:
ax2.bar(x, y, align="edge", width=w, edgecolor="black", label=label[i], linewidth=1, alpha=0.7, color=colors[i])
if i == len(list_to_plot2) - 1:
barWidth += 1. / (len(list_to_plot) + 1) + 1. / (len(list_to_plot) + 1)
else:
barWidth += 1. / (len(list_to_plot) + 1)
ax2.legend(loc='upper right', fontsize=14, frameon=True, bbox_to_anchor=(0.9, 1))
if len(list_to_plot2) == 1:
ax2.set_xticks(numpy.array([xi + 0.2 for xi in x]))
else:
ax2.set_xticks(numpy.array(ticks))
ax2.set_xticklabels(ticks1)
ax2.set_xlabel("Family size", fontsize=14)
ax2.set_ylabel(ylab, fontsize=14)
if log_axis:
ax2.set_yscale('log')
ax2.grid(b=True, which="major", color="#424242", linestyle=":")
ax2.margins(0.01, None)
pdf.savefig(fig2)
plt.close()
# write data to CSV file tags
counts = [numpy.bincount(di, minlength=22)[1:] for di in list_to_plot2] # original counts of family sizes
output_file.write("Values from family size distribution with all datasets based on families\n")
output_file.write("\nFamily size")
for i in label:
output_file.write("{}{}".format(sep, i))
output_file.write("\n")
j = 0
for fs in bins:
if fs == 21:
fs = ">20"
else:
fs = "={}".format(fs)
output_file.write("FS{}{}".format(fs, sep))
for n in range(len(label)):
output_file.write("{}{}".format(int(counts[n][j]), sep))
output_file.write("\n")
j += 1
output_file.write("sum{}".format(sep))
for i in counts:
output_file.write("{}{}".format(int(sum(i)), sep))
# write data to CSV file PE reads
output_file.write("\n\nValues from family size distribution with all datasets based on PE reads\n")
output_file.write("\nFamily size")
for i in label:
output_file.write("{}{}".format(sep, i))
output_file.write("\n")
j = 0
for fs in bins:
if fs == 21:
fs = ">20"
else:
fs = "={}".format(fs)
output_file.write("FS{}{}".format(fs, sep))
if len(label) == 1:
output_file.write("{}{}".format(int(reads[0][j]), sep))
else:
for n in range(len(label)):
output_file.write("{}{}".format(int(reads[n][j]), sep))
output_file.write("\n")
j += 1
output_file.write("sum{}".format(sep))
if len(label) == 1:
output_file.write("{}{}".format(int(sum(numpy.concatenate(reads))), sep))
else:
for i in reads:
output_file.write("{}{}".format(int(sum(i)), sep))
output_file.write("\n")
# Family size distribution after DCS and SSCS
for dataset, data_o, name_file in zip(list_to_plot, data_array_list, label):
tags = numpy.array(data_o[:, 2])
seq = numpy.array(data_o[:, 1])
data = numpy.array(dataset)
data_o = numpy.array(data_o[:, 0]).astype(int)
# find all unique tags and get the indices for ALL tags, but only once
u, index_unique, c = numpy.unique(numpy.array(seq), return_counts=True, return_index=True)
d = u[c > 1]
# get family sizes, tag for duplicates
duplTags_double = data[numpy.in1d(seq, d)]
duplTags_double_o = data_o[numpy.in1d(seq, d)]
duplTags = duplTags_double[0::2] # ab of DCS
duplTags_o = duplTags_double_o[0::2] # ab of DCS
duplTagsBA = duplTags_double[1::2] # ba of DCS
duplTagsBA_o = duplTags_double_o[1::2] # ba of DCS
# get family sizes for SSCS with no partner
ab = numpy.where(tags == "ab")[0]
abSeq = seq[ab]
ab_o = data_o[ab]
ab = data[ab]
ba = numpy.where(tags == "ba")[0]
baSeq = seq[ba]
ba_o = data_o[ba]
ba = data[ba]
dataAB = ab[numpy.in1d(abSeq, d, invert=True)]
dataAB_o = ab_o[numpy.in1d(abSeq, d, invert=True)]
dataBA = ba[numpy.in1d(baSeq, d, invert=True)]
dataBA_o = ba_o[numpy.in1d(baSeq, d, invert=True)]
list1 = [duplTags_double, dataAB, dataBA] # list for plotting
list1_o = [duplTags_double_o, dataAB_o, dataBA_o] # list for plotting
# information for family size >= 3
dataAB_FS3 = dataAB[dataAB >= 3]
dataAB_FS3_o = dataAB_o[dataAB_o >= 3]
dataBA_FS3 = dataBA[dataBA >= 3]
dataBA_FS3_o = dataBA_o[dataBA_o >= 3]
duplTags_FS3 = duplTags[(duplTags >= 3) & (duplTagsBA >= 3)] # ab+ba with FS>=3
duplTags_FS3_BA = duplTagsBA[(duplTags >= 3) & (duplTagsBA >= 3)] # ba+ab with FS>=3
duplTags_double_FS3 = len(duplTags_FS3) + len(duplTags_FS3_BA) # both ab and ba strands with FS>=3
# original FS
duplTags_FS3_o = duplTags_o[(duplTags_o >= 3) & (duplTagsBA_o >= 3)] # ab+ba with FS>=3
duplTags_FS3_BA_o = duplTagsBA_o[(duplTags_o >= 3) & (duplTagsBA_o >= 3)] # ba+ab with FS>=3
duplTags_double_FS3_o = sum(duplTags_FS3_o) + sum(duplTags_FS3_BA_o) # both ab and ba strands with FS>=3
fig = plt.figure()
plt.subplots_adjust(left=0.12, right=0.97, bottom=0.3, top=0.94, hspace=0)
if rel_freq:
w = [numpy.zeros_like(dj) + 1. / len(numpy.concatenate(list1)) for dj in list1]
plt.hist(list1, bins=numpy.arange(1, 23), stacked=True, label=["duplex", "ab", "ba"], weights=w, edgecolor="black", linewidth=1, align="left", color=["#FF0000", "#5FB404", "#FFBF00"], rwidth=0.8)
plt.ylim(0, 1.07)
else:
plt.hist(list1, bins=numpy.arange(1, 23), stacked=True, label=["duplex", "ab", "ba"], edgecolor="black", linewidth=1, align="left", color=["#FF0000", "#5FB404", "#FFBF00"], rwidth=0.8)
# tick labels of x axis
ticks = numpy.arange(1, 22, 1)
ticks1 = [str(_) for _ in ticks]
ticks1[len(ticks1) - 1] = ">20"
plt.xticks(numpy.array(ticks), ticks1)
singl = len(data_o[data_o == 1])
last = len(data_o[data_o > 20]) # large families
if log_axis:
plt.yscale('log')
plt.legend(loc='upper right', fontsize=14, bbox_to_anchor=(0.9, 1), frameon=True)
plt.title("{}: FSD based on families".format(name_file), fontsize=14)
plt.xlabel("Family size", fontsize=14)
plt.ylabel(ylab, fontsize=14)
plt.margins(0.01, None)
plt.grid(b=True, which="major", color="#424242", linestyle=":")
# extra information beneath the plot
legend = "SSCS ab= \nSSCS ba= \nDCS (total)= \ntotal nr. of tags="
plt.text(0.1, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend = "nr. of tags\n\n{:,}\n{:,}\n{:,} ({:,})\n{:,} ({:,})".format(len(dataAB), len(dataBA),
len(duplTags), len(duplTags_double), (len(dataAB) + len(dataBA) + len(duplTags)),
(len(ab) + len(ba)))
plt.text(0.23, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend5 = "PE reads\n\n{:,}\n{:,}\n{:,} ({:,})\n{:,} ({:,})".format(sum(dataAB_o), sum(dataBA_o),
sum(duplTags_o), sum(duplTags_double_o),
(sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
(sum(ab_o) + sum(ba_o)))
plt.text(0.38, 0.09, legend5, size=10, transform=plt.gcf().transFigure)
legend = "rel. freq. of tags\nunique\n{:.3f}\n{:.3f}\n{:.3f}\n{:,}".format(
float(len(dataAB)) / (len(dataAB) + len(dataBA) + len(duplTags)),
float(len(dataBA)) / (len(dataAB) + len(dataBA) + len(duplTags)),
float(len(duplTags)) / (len(dataAB) + len(dataBA) + len(duplTags)),
(len(dataAB) + len(dataBA) + len(duplTags)))
plt.text(0.54, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend = "total\n{:.3f}\n{:.3f}\n{:.3f} ({:.3f})\n{:,}".format(float(len(dataAB)) / (len(ab) + len(ba)),
float(len(dataBA)) / (len(ab) + len(ba)),
float(len(duplTags)) / (len(ab) + len(ba)),
float(len(duplTags_double)) / (len(ab) + len(ba)),
(len(ab) + len(ba)))
plt.text(0.64, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend1 = "\nsingletons:\nfamily size > 20:"
plt.text(0.1, 0.03, legend1, size=10, transform=plt.gcf().transFigure)
legend4 = "{:,}\n{:,}".format(singl, last)
plt.text(0.23, 0.03, legend4, size=10, transform=plt.gcf().transFigure)
legend3 = "{:.3f}\n{:.3f}".format(float(singl) / len(data), float(last) / len(data))
plt.text(0.64, 0.03, legend3, size=10, transform=plt.gcf().transFigure)
legend3 = "\n\n{:,}".format(sum(data_o[data_o > 20]))
plt.text(0.38, 0.03, legend3, size=10, transform=plt.gcf().transFigure)
legend3 = "{:.3f}\n{:.3f}".format(float(singl) / sum(data_o), float(sum(data_o[data_o > 20])) / sum(data_o))
plt.text(0.84, 0.03, legend3, size=10, transform=plt.gcf().transFigure)
legend = "PE reads\nunique\n{:.3f}\n{:.3f}\n{:.3f}\n{:,}".format(
float(sum(dataAB_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
float(sum(dataBA_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
float(sum(duplTags_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
(sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)))
plt.text(0.74, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend = "total\n{:.3f}\n{:.3f}\n{:.3f} ({:.3f})\n{:,}".format(
float(sum(dataAB_o)) / (sum(ab_o) + sum(ba_o)),
float(sum(dataBA_o)) / (sum(ab_o) + sum(ba_o)),
float(sum(duplTags_o)) / (sum(ab_o) + sum(ba_o)),
float(sum(duplTags_double_o)) / (sum(ab_o) + sum(ba_o)), (sum(ab_o) + sum(ba_o)))
plt.text(0.84, 0.09, legend, size=10, transform=plt.gcf().transFigure)
pdf.savefig(fig)
plt.close()
# PLOT FSD based on PE reads
fig3 = plt.figure()
plt.subplots_adjust(left=0.12, right=0.97, bottom=0.3, top=0.94, hspace=0)
fig3.suptitle("{}: FSD based on PE reads".format(name_file), fontsize=14)
ax2 = fig3.add_subplot(1, 1, 1)
ticks = numpy.arange(1, 22)
ticks1 = [str(_) for _ in ticks]
ticks1[len(ticks1) - 1] = ">20"
reads = []
reads_rel = []
# barWidth = 0 - (len(list_to_plot) + 1) / 2 * 1. / (len(list_to_plot) + 1)
ax2.set_xticks([], [])
list_y = []
label = ["duplex", "ab", "ba"]
col = ["#FF0000", "#5FB404", "#FFBF00"]
for i in range(len(list1)):
x = list(numpy.arange(1, 22).astype(float))
unique, c = numpy.unique(list1[i], return_counts=True)
y = unique * c
if sum(list1_o[i] > 20) > 0:
y[len(y) - 1] = sum(list1_o[i][list1_o[i] > 20])
y = [y[x[idx] == unique][0] if x[idx] in unique else 0 for idx in range(len(x))]
reads.append(y)
reads_rel.append(list(numpy.float_(y)) / sum(numpy.concatenate(list1_o)))
if rel_freq:
y = list(numpy.float_(y)) / sum(numpy.concatenate(list1_o))
ax2.set_ylim(0, 1.07)
else:
y = y
list_y.append(y)
if i == 0:
ax2.bar(x, y, align="center", width=0.8, edgecolor="black", label=label[0], linewidth=1, alpha=1, color=col[0])
elif i == 1:
ax2.bar(x, y, bottom=list_y[i - 1], align="center", width=0.8, edgecolor="black", label=label[1], linewidth=1, alpha=1, color=col[1])
elif i == 2:
bars = numpy.add(list_y[0], list_y[1]).tolist()
ax2.bar(x, y, bottom=bars, align="center", width=0.8, edgecolor="black", label=label[2], linewidth=1, alpha=1, color=col[2])
ax2.legend(loc='upper right', fontsize=14, frameon=True, bbox_to_anchor=(0.9, 1))
singl = len(data_o[data_o == 1])
last = len(data_o[data_o > 20]) # large families
ax2.set_xticks(numpy.array(ticks))
ax2.set_xticklabels(ticks1)
ax2.set_xlabel("Family size", fontsize=14)
ax2.set_ylabel(ylab, fontsize=14)
if log_axis:
ax2.set_yscale('log')
ax2.grid(b=True, which="major", color="#424242", linestyle=":")
ax2.margins(0.01, None)
# extra information beneath the plot
legend = "SSCS ab= \nSSCS ba= \nDCS (total)= \ntotal nr. of tags="
plt.text(0.1, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend = "nr. of tags\n\n{:,}\n{:,}\n{:,} ({:,})\n{:,} ({:,})".format(len(dataAB), len(dataBA),
len(duplTags), len(duplTags_double), (len(dataAB) + len(dataBA) + len(duplTags)),
(len(ab) + len(ba)))
plt.text(0.23, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend5 = "PE reads\n\n{:,}\n{:,}\n{:,} ({:,})\n{:,} ({:,})".format(sum(dataAB_o), sum(dataBA_o),
sum(duplTags_o), sum(duplTags_double_o),
(sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
(sum(ab_o) + sum(ba_o)))
plt.text(0.38, 0.09, legend5, size=10, transform=plt.gcf().transFigure)
legend = "rel. freq. of tags\nunique\n{:.3f}\n{:.3f}\n{:.3f}\n{:,}".format(
float(len(dataAB)) / (len(dataAB) + len(dataBA) + len(duplTags)),
float(len(dataBA)) / (len(dataAB) + len(dataBA) + len(duplTags)),
float(len(duplTags)) / (len(dataAB) + len(dataBA) + len(duplTags)),
(len(dataAB) + len(dataBA) + len(duplTags)))
plt.text(0.54, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend = "total\n{:.3f}\n{:.3f}\n{:.3f} ({:.3f})\n{:,}".format(float(len(dataAB)) / (len(ab) + len(ba)),
float(len(dataBA)) / (len(ab) + len(ba)),
float(len(duplTags)) / (len(ab) + len(ba)),
float(len(duplTags_double)) / (len(ab) + len(ba)),
(len(ab) + len(ba)))
plt.text(0.64, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend1 = "\nsingletons:\nfamily size > 20:"
plt.text(0.1, 0.03, legend1, size=10, transform=plt.gcf().transFigure)
legend4 = "{:,}\n{:,}".format(singl, last)
plt.text(0.23, 0.03, legend4, size=10, transform=plt.gcf().transFigure)
legend3 = "{:.3f}\n{:.3f}".format(float(singl) / len(data), float(last) / len(data))
plt.text(0.64, 0.03, legend3, size=10, transform=plt.gcf().transFigure)
legend3 = "\n\n{:,}".format(sum(data_o[data_o > 20]))
plt.text(0.38, 0.03, legend3, size=10, transform=plt.gcf().transFigure)
legend3 = "{:.3f}\n{:.3f}".format(float(singl) / sum(data_o), float(sum(data_o[data_o > 20])) / sum(data_o))
plt.text(0.84, 0.03, legend3, size=10, transform=plt.gcf().transFigure)
legend = "PE reads\nunique\n{:.3f}\n{:.3f}\n{:.3f}\n{:,}".format(
float(sum(dataAB_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
float(sum(dataBA_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
float(sum(duplTags_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
(sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)))
plt.text(0.74, 0.09, legend, size=10, transform=plt.gcf().transFigure)
legend = "total\n{:.3f}\n{:.3f}\n{:.3f} ({:.3f})\n{:,}".format(
float(sum(dataAB_o)) / (sum(ab_o) + sum(ba_o)),
float(sum(dataBA_o)) / (sum(ab_o) + sum(ba_o)),
float(sum(duplTags_o)) / (sum(ab_o) + sum(ba_o)),
float(sum(duplTags_double_o)) / (sum(ab_o) + sum(ba_o)), (sum(ab_o) + sum(ba_o)))
plt.text(0.84, 0.09, legend, size=10, transform=plt.gcf().transFigure)
pdf.savefig(fig3)
plt.close()
# write same information to a csv file
count = numpy.bincount(data_o) # original counts of family sizes
output_file.write("\nDataset:{}{}\n".format(sep, name_file))
output_file.write("max. family size:{}{}\n".format(sep, max(data_o)))
output_file.write("absolute frequency:{}{}\n".format(sep, count[len(count) - 1]))
output_file.write("relative frequency:{}{:.3f}\n\n".format(sep, float(count[len(count) - 1]) / sum(count)))
output_file.write("median family size:{}{}\n".format(sep, numpy.median(numpy.array(data_o))))
output_file.write("mean family size:{}{}\n\n".format(sep, numpy.mean(numpy.array(data_o))))
output_file.write(
"{}singletons:{}{}{}family size > 20:{}{}{}{}length of dataset:\n".format(sep, sep, sep, sep, sep, sep,
sep, sep))
output_file.write(
"{}nr. of tags{}rel. freq of tags{}rel.freq of PE reads{}nr. of tags{}rel. freq of tags{}nr. of PE reads{}rel. freq of PE reads{}total nr. of tags{}total nr. of PE reads\n".format(
sep, sep, sep, sep, sep, sep, sep, sep, sep))
output_file.write("{}{}{}{}{:.3f}{}{:.3f}{}{}{}{:.3f}{}{}{}{:.3f}{}{}{}{}\n\n".format(
name_file, sep, singl, sep, float(singl) / len(data), sep, float(singl) / sum(data_o), sep,
last, sep, float(last) / len(data), sep, sum(data_o[data_o > 20]), sep, float(sum(data_o[data_o > 20])) / sum(data_o), sep, len(data),
sep, sum(data_o)))
# information for FS >= 1
output_file.write(
"The unique frequencies were calculated from the dataset where the tags occured only once (=ab without DCS, ba without DCS)\n"
"Whereas the total frequencies were calculated from the whole dataset (=including the DCS).\n\n")
output_file.write(
"FS >= 1{}nr. of tags{}nr. of PE reads{}rel. freq of tags{}{}rel. freq of PE reads:\n".format(sep, sep,
sep, sep,
sep))
output_file.write("{}{}{}unique:{}total{}unique{}total:\n".format(sep, sep, sep, sep, sep, sep))
output_file.write("SSCS ab{}{}{}{}{}{:.3f}{}{:.3f}{}{:.3f}{}{:.3f}\n".format(
sep, len(dataAB), sep, sum(dataAB_o), sep,
float(len(dataAB)) / (len(dataAB) + len(dataBA) + len(duplTags)),
sep, float(len(dataAB)) / (len(ab) + len(ba)), sep, float(sum(dataAB_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
sep, float(sum(dataAB_o)) / (sum(ab_o) + sum(ba_o))))
output_file.write("SSCS ba{}{}{}{}{}{:.3f}{}{:.3f}{}{:.3f}{}{:.3f}\n".format(
sep, len(dataBA), sep, sum(dataBA_o), sep,
float(len(dataBA)) / (len(dataAB) + len(dataBA) + len(duplTags)),
sep, float(len(dataBA)) / (len(ab) + len(ba)), sep,
float(sum(dataBA_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)),
sep, float(sum(dataBA_o)) / (sum(ab_o) + sum(ba_o))))
output_file.write(
"DCS (total){}{} ({}){}{} ({}){}{:.3f}{}{:.3f} ({:.3f}){}{:.3f}{}{:.3f} ({:.3f})\n".format(
sep, len(duplTags), len(duplTags_double), sep, sum(duplTags_o), sum(duplTags_double_o), sep,
float(len(duplTags)) / (len(dataAB) + len(dataBA) + len(duplTags)), sep,
float(len(duplTags)) / (len(ab) + len(ba)), float(len(duplTags_double)) / (len(ab) + len(ba)), sep,
float(sum(duplTags_o)) / (sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)), sep,
float(sum(duplTags_o)) / (sum(ab_o) + sum(ba_o)),
float(sum(duplTags_double_o)) / (sum(ab_o) + sum(ba_o))))
output_file.write("total nr. of tags{}{}{}{}{}{}{}{}{}{}{}{}\n".format(
sep, (len(dataAB) + len(dataBA) + len(duplTags)), sep,
(sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)), sep,
(len(dataAB) + len(dataBA) + len(duplTags)), sep, (len(ab) + len(ba)), sep,
(sum(dataAB_o) + sum(dataBA_o) + sum(duplTags_o)), sep, (sum(ab_o) + sum(ba_o))))
# information for FS >= 3
output_file.write(
"\nFS >= 3{}nr. of tags{}nr. of PE reads{}rel. freq of tags{}{}rel. freq of PE reads:\n".format(sep,
sep,
sep,
sep,
sep))
output_file.write("{}{}{}unique:{}total{}unique{}total:\n".format(sep, sep, sep, sep, sep, sep))
output_file.write("SSCS ab{}{}{}{}{}{:.3f}{}{:.3f}{}{:.3f}{}{:.3f}\n".format(
sep, len(dataAB_FS3), sep, sum(dataAB_FS3_o), sep,
float(len(dataAB_FS3)) / (len(dataAB_FS3) + len(dataBA_FS3) + len(duplTags_FS3)), sep,
float(len(dataAB_FS3)) / (len(dataBA_FS3) + len(dataBA_FS3) + duplTags_double_FS3),
sep, float(sum(dataAB_FS3_o)) / (sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + sum(duplTags_FS3_o)),
sep, float(sum(dataAB_FS3_o)) / (sum(dataBA_FS3_o) + sum(dataBA_FS3_o) + duplTags_double_FS3_o)))
output_file.write("SSCS ba{}{}{}{}{}{:.3f}{}{:.3f}{}{:.3f}{}{:.3f}\n".format(
sep, len(dataBA_FS3), sep, sum(dataBA_FS3_o), sep,
float(len(dataBA_FS3)) / (len(dataBA_FS3) + len(dataBA_FS3) + len(duplTags_FS3)),
sep, float(len(dataBA_FS3)) / (len(dataBA_FS3) + len(dataBA_FS3) + duplTags_double_FS3),
sep, float(sum(dataBA_FS3_o)) / (sum(dataBA_FS3_o) + sum(dataBA_FS3_o) + sum(duplTags_FS3_o)),
sep, float(sum(dataBA_FS3_o)) / (sum(dataBA_FS3_o) + sum(dataBA_FS3_o) + duplTags_double_FS3_o)))
output_file.write(
"DCS (total){}{} ({}){}{} ({}){}{:.3f}{}{:.3f} ({:.3f}){}{:.3f}{}{:.3f} ({:.3f})\n".format(
sep, len(duplTags_FS3), duplTags_double_FS3, sep, sum(duplTags_FS3_o), duplTags_double_FS3_o, sep,
float(len(duplTags_FS3)) / (len(dataAB_FS3) + len(dataBA_FS3) + len(duplTags_FS3)), sep,
float(len(duplTags_FS3)) / (len(dataAB_FS3) + len(dataBA_FS3) + duplTags_double_FS3),
float(duplTags_double_FS3) / (len(dataAB_FS3) + len(dataBA_FS3) + duplTags_double_FS3),
sep, float(sum(duplTags_FS3_o)) / (sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + sum(duplTags_FS3_o)),
sep,
float(sum(duplTags_FS3_o)) / (sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + duplTags_double_FS3_o),
float(duplTags_double_FS3_o) / (sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + duplTags_double_FS3_o)))
output_file.write("total nr. of tags{}{}{}{}{}{}{}{}{}{}{}{}\n".format(
sep, (len(dataAB_FS3) + len(dataBA_FS3) + len(duplTags_FS3)), sep,
(sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + sum(duplTags_FS3_o)),
sep, (len(dataAB_FS3) + len(dataBA_FS3) + len(duplTags_FS3)), sep,
(len(dataAB_FS3) + len(dataBA_FS3) + duplTags_double_FS3),
sep, (sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + sum(duplTags_FS3_o)), sep,
(sum(dataAB_FS3_o) + sum(dataBA_FS3_o) + duplTags_double_FS3_o)))
counts = [numpy.bincount(dk, minlength=22)[1:] for dk in list1] # original counts of family sizes
output_file.write("\nValues from family size distribution based on families\n")
output_file.write("{}duplex{}ab{}ba{}sum\n".format(sep, sep, sep, sep))
j = 0
for fs in bins:
if fs == 21:
fs = ">20"
else:
fs = "={}".format(fs)
output_file.write("FS{}{}".format(fs, sep))
for n in range(3):
output_file.write("{}{}".format(int(counts[n][j]), sep))
output_file.write("{}\n".format(counts[0][j] + counts[1][j] + counts[2][j]))
j += 1
output_file.write("sum{}".format(sep))
for i in counts:
output_file.write("{}{}".format(int(sum(i)), sep))
output_file.write("{}\n".format(sum(counts[0] + counts[1] + counts[2])))
output_file.write("\nValues from family size distribution based on PE reads\n")
output_file.write("{}duplex{}ab{}ba{}sum\n".format(sep, sep, sep, sep))
j = 0
for fs in bins:
if fs == 21:
fs = ">20"
else:
fs = "={}".format(fs)
output_file.write("FS{}{}".format(fs, sep))
for n in range(3):
output_file.write("{}{}".format(int(reads[n][j]), sep))
output_file.write("{}\n".format(reads[0][j] + reads[1][j] + reads[2][j]))
j += 1
output_file.write("sum{}".format(sep))
for i in reads:
output_file.write("{}{}".format(int(sum(i)), sep))
output_file.write("{}\n".format(sum(reads[0] + reads[1] + reads[2])))
print("Files successfully created!")
if __name__ == '__main__':
sys.exit(compare_read_families(sys.argv))
| mit |
UltronAI/Deep-Learning | CS231n/assignment1/cs231n/data_utils.py | 2 | 8012 | from __future__ import print_function
from six.moves import cPickle as pickle
import numpy as np
import os
from scipy.misc import imread
import platform
def load_pickle(f):
version = platform.python_version_tuple()
if version[0] == '2':
return pickle.load(f)
elif version[0] == '3':
return pickle.load(f, encoding='latin1')
raise ValueError("invalid python version: {}".format(version))
def load_CIFAR_batch(filename):
""" load single batch of cifar """
with open(filename, 'rb') as f:
datadict = load_pickle(f)
X = datadict['data']
Y = datadict['labels']
X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype("float")
Y = np.array(Y)
return X, Y
def load_CIFAR10(ROOT):
""" load all of cifar """
xs = []
ys = []
for b in range(1,6):
f = os.path.join(ROOT, 'data_batch_%d' % (b, ))
X, Y = load_CIFAR_batch(f)
xs.append(X)
ys.append(Y)
Xtr = np.concatenate(xs)
Ytr = np.concatenate(ys)
del X, Y
Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))
return Xtr, Ytr, Xte, Yte
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000,
subtract_mean=True):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for classifiers. These are the same steps as we used for the SVM, but
condensed to a single function.
"""
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = list(range(num_training, num_training + num_validation))
X_val = X_train[mask]
y_val = y_train[mask]
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
if subtract_mean:
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Transpose so that channels come first
X_train = X_train.transpose(0, 3, 1, 2).copy()
X_val = X_val.transpose(0, 3, 1, 2).copy()
X_test = X_test.transpose(0, 3, 1, 2).copy()
# Package data into a dictionary
return {
'X_train': X_train, 'y_train': y_train,
'X_val': X_val, 'y_val': y_val,
'X_test': X_test, 'y_test': y_test,
}
def load_tiny_imagenet(path, dtype=np.float32, subtract_mean=True):
"""
Load TinyImageNet. Each of TinyImageNet-100-A, TinyImageNet-100-B, and
TinyImageNet-200 have the same directory structure, so this can be used
to load any of them.
Inputs:
- path: String giving path to the directory to load.
- dtype: numpy datatype used to load the data.
- subtract_mean: Whether to subtract the mean training image.
Returns: A dictionary with the following entries:
- class_names: A list where class_names[i] is a list of strings giving the
WordNet names for class i in the loaded dataset.
- X_train: (N_tr, 3, 64, 64) array of training images
- y_train: (N_tr,) array of training labels
- X_val: (N_val, 3, 64, 64) array of validation images
- y_val: (N_val,) array of validation labels
- X_test: (N_test, 3, 64, 64) array of testing images.
- y_test: (N_test,) array of test labels; if test labels are not available
(such as in student code) then y_test will be None.
- mean_image: (3, 64, 64) array giving mean training image
"""
# First load wnids
with open(os.path.join(path, 'wnids.txt'), 'r') as f:
wnids = [x.strip() for x in f]
# Map wnids to integer labels
wnid_to_label = {wnid: i for i, wnid in enumerate(wnids)}
# Use words.txt to get names for each class
with open(os.path.join(path, 'words.txt'), 'r') as f:
wnid_to_words = dict(line.split('\t') for line in f)
for wnid, words in wnid_to_words.iteritems():
wnid_to_words[wnid] = [w.strip() for w in words.split(',')]
class_names = [wnid_to_words[wnid] for wnid in wnids]
# Next load training data.
X_train = []
y_train = []
for i, wnid in enumerate(wnids):
if (i + 1) % 20 == 0:
print('loading training data for synset %d / %d' % (i + 1, len(wnids)))
# To figure out the filenames we need to open the boxes file
boxes_file = os.path.join(path, 'train', wnid, '%s_boxes.txt' % wnid)
with open(boxes_file, 'r') as f:
filenames = [x.split('\t')[0] for x in f]
num_images = len(filenames)
X_train_block = np.zeros((num_images, 3, 64, 64), dtype=dtype)
y_train_block = wnid_to_label[wnid] * np.ones(num_images, dtype=np.int64)
for j, img_file in enumerate(filenames):
img_file = os.path.join(path, 'train', wnid, 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
## grayscale file
img.shape = (64, 64, 1)
X_train_block[j] = img.transpose(2, 0, 1)
X_train.append(X_train_block)
y_train.append(y_train_block)
# We need to concatenate all training data
X_train = np.concatenate(X_train, axis=0)
y_train = np.concatenate(y_train, axis=0)
# Next load validation data
with open(os.path.join(path, 'val', 'val_annotations.txt'), 'r') as f:
img_files = []
val_wnids = []
for line in f:
img_file, wnid = line.split('\t')[:2]
img_files.append(img_file)
val_wnids.append(wnid)
num_val = len(img_files)
y_val = np.array([wnid_to_label[wnid] for wnid in val_wnids])
X_val = np.zeros((num_val, 3, 64, 64), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'val', 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
img.shape = (64, 64, 1)
X_val[i] = img.transpose(2, 0, 1)
# Next load test images
# Students won't have test labels, so we need to iterate over files in the
# images directory.
img_files = os.listdir(os.path.join(path, 'test', 'images'))
X_test = np.zeros((len(img_files), 3, 64, 64), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'test', 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
img.shape = (64, 64, 1)
X_test[i] = img.transpose(2, 0, 1)
y_test = None
y_test_file = os.path.join(path, 'test', 'test_annotations.txt')
if os.path.isfile(y_test_file):
with open(y_test_file, 'r') as f:
img_file_to_wnid = {}
for line in f:
line = line.split('\t')
img_file_to_wnid[line[0]] = line[1]
y_test = [wnid_to_label[img_file_to_wnid[img_file]] for img_file in img_files]
y_test = np.array(y_test)
mean_image = X_train.mean(axis=0)
if subtract_mean:
X_train -= mean_image[None]
X_val -= mean_image[None]
X_test -= mean_image[None]
return {
'class_names': class_names,
'X_train': X_train,
'y_train': y_train,
'X_val': X_val,
'y_val': y_val,
'X_test': X_test,
'y_test': y_test,
'class_names': class_names,
'mean_image': mean_image,
}
def load_models(models_dir):
"""
Load saved models from disk. This will attempt to unpickle all files in a
directory; any files that give errors on unpickling (such as README.txt) will
be skipped.
Inputs:
- models_dir: String giving the path to a directory containing model files.
Each model file is a pickled dictionary with a 'model' field.
Returns:
A dictionary mapping model file names to models.
"""
models = {}
for model_file in os.listdir(models_dir):
with open(os.path.join(models_dir, model_file), 'rb') as f:
try:
models[model_file] = load_pickle(f)['model']
except pickle.UnpicklingError:
continue
return models
| mit |
SysCV/tet | teter/models/roi_heads/track_heads/cem_head.py | 1 | 3356 | import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.models import HEADS, build_loss
from teter.core import cal_similarity
@HEADS.register_module(force=True)
class ClsExemplarHead(nn.Module):
def __init__(
self,
num_convs=4,
num_fcs=1,
roi_feat_size=7,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
embed_channels=256,
conv_cfg=None,
norm_cfg=None,
softmax_temp=-1,
loss_track=dict(type="MultiPosCrossEntropyLoss", loss_weight=1),
):
super(ClsExemplarHead, self).__init__()
self.num_convs = num_convs
self.num_fcs = num_fcs
self.roi_feat_size = roi_feat_size
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.embed_channels = embed_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.relu = nn.ReLU(inplace=True)
self.convs, self.fcs, last_layer_dim = self._add_conv_fc_branch(
self.num_convs, self.num_fcs, self.in_channels
)
self.fc_embed = nn.Linear(last_layer_dim, embed_channels)
self.softmax_temp = softmax_temp
self.loss_track = build_loss(loss_track)
def _add_conv_fc_branch(self, num_convs, num_fcs, in_channels):
last_layer_dim = in_channels
# add branch specific conv layers
convs = nn.ModuleList()
if num_convs > 0:
for i in range(num_convs):
conv_in_channels = last_layer_dim if i == 0 else self.conv_out_channels
convs.append(
ConvModule(
conv_in_channels,
self.conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
)
)
last_layer_dim = self.conv_out_channels
# add branch specific fc layers
fcs = nn.ModuleList()
if num_fcs > 0:
last_layer_dim *= self.roi_feat_size * self.roi_feat_size
for i in range(num_fcs):
fc_in_channels = last_layer_dim if i == 0 else self.fc_out_channels
fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return convs, fcs, last_layer_dim
def init_weights(self):
for m in self.fcs:
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.fc_embed.weight, 0, 0.01)
nn.init.constant_(self.fc_embed.bias, 0)
def forward(self, x):
if self.num_convs > 0:
for i, conv in enumerate(self.convs):
x = conv(x)
x = x.view(x.size(0), -1)
if self.num_fcs > 0:
for i, fc in enumerate(self.fcs):
x = self.relu(fc(x))
x = self.fc_embed(x)
return x
def sup_contra_loss(self, features, labels):
losses = dict()
loss_track = self.loss_track(features, labels)
losses["loss_cem"] = loss_track
return losses
| apache-2.0 |
arahuja/scikit-learn | examples/linear_model/plot_ard.py | 247 | 2622 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
s40523141/2016fallcp_hw | plugin/liquid_tags/notebook.py | 26 | 10904 | """
Notebook Tag
------------
This is a liquid-style tag to include a static html rendering of an IPython
notebook in a blog post.
Syntax
------
{% notebook filename.ipynb [ cells[start:end] ]%}
The file should be specified relative to the ``notebooks`` subdirectory of the
content directory. Optionally, this subdirectory can be specified in the
config file:
NOTEBOOK_DIR = 'notebooks'
The cells[start:end] statement is optional, and can be used to specify which
block of cells from the notebook to include.
Requirements
------------
- The plugin requires IPython version 1.0 or above. It no longer supports the
standalone nbconvert package, which has been deprecated.
Details
-------
Because the notebook relies on some rather extensive custom CSS, the use of
this plugin requires additional CSS to be inserted into the blog theme.
After typing "make html" when using the notebook tag, a file called
``_nb_header.html`` will be produced in the main directory. The content
of the file should be included in the header of the theme. An easy way
to accomplish this is to add the following lines within the header template
of the theme you use:
{% if EXTRA_HEADER %}
{{ EXTRA_HEADER }}
{% endif %}
and in your ``pelicanconf.py`` file, include the line:
EXTRA_HEADER = open('_nb_header.html').read().decode('utf-8')
this will insert the appropriate CSS. All efforts have been made to ensure
that this CSS will not override formats within the blog theme, but there may
still be some conflicts.
"""
import re
import os
from functools import partial
from .mdx_liquid_tags import LiquidTags
import IPython
IPYTHON_VERSION = IPython.version_info[0]
try:
import nbformat
except:
pass
if not IPYTHON_VERSION >= 1:
raise ValueError("IPython version 1.0+ required for notebook tag")
try:
from nbconvert.filters.highlight import _pygments_highlight
except ImportError:
try:
from IPython.nbconvert.filters.highlight import _pygments_highlight
except ImportError:
# IPython < 2.0
from IPython.nbconvert.filters.highlight import _pygment_highlight as _pygments_highlight
from pygments.formatters import HtmlFormatter
try:
from nbconvert.exporters import HTMLExporter
except ImportError:
from IPython.nbconvert.exporters import HTMLExporter
try:
from traitlets.config import Config
except ImportError:
from IPython.config import Config
try:
from nbconvert.preprocessors import Preprocessor
except ImportError:
try:
from IPython.nbconvert.preprocessors import Preprocessor
except ImportError:
# IPython < 2.0
from IPython.nbconvert.transformers import Transformer as Preprocessor
try:
from traitlets import Integer
except ImportError:
from IPython.utils.traitlets import Integer
from copy import deepcopy
#----------------------------------------------------------------------
# Some code that will be added to the header:
# Some of the following javascript/css include is adapted from
# IPython/nbconvert/templates/fullhtml.tpl, while some are custom tags
# specifically designed to make the results look good within the
# pelican-octopress theme.
JS_INCLUDE = r"""
<style type="text/css">
/* Overrides of notebook CSS for static HTML export */
div.entry-content {
overflow: visible;
padding: 8px;
}
.input_area {
padding: 0.2em;
}
a.heading-anchor {
white-space: normal;
}
.rendered_html
code {
font-size: .8em;
}
pre.ipynb {
color: black;
background: #f7f7f7;
border: none;
box-shadow: none;
margin-bottom: 0;
padding: 0;
margin: 0px;
font-size: 13px;
}
/* remove the prompt div from text cells */
div.text_cell .prompt {
display: none;
}
/* remove horizontal padding from text cells, */
/* so it aligns with outer body text */
div.text_cell_render {
padding: 0.5em 0em;
}
img.anim_icon{padding:0; border:0; vertical-align:middle; -webkit-box-shadow:none; -box-shadow:none}
div.collapseheader {
width=100%;
background-color:#d3d3d3;
padding: 2px;
cursor: pointer;
font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;
}
</style>
<script src="https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML" type="text/javascript"></script>
<script type="text/javascript">
init_mathjax = function() {
if (window.MathJax) {
// MathJax loaded
MathJax.Hub.Config({
tex2jax: {
inlineMath: [ ['$','$'], ["\\(","\\)"] ],
displayMath: [ ['$$','$$'], ["\\[","\\]"] ]
},
displayAlign: 'left', // Change this to 'center' to center equations.
"HTML-CSS": {
styles: {'.MathJax_Display': {"margin": 0}}
}
});
MathJax.Hub.Queue(["Typeset",MathJax.Hub]);
}
}
init_mathjax();
</script>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
<script type="text/javascript">
jQuery(document).ready(function($) {
$("div.collapseheader").click(function () {
$header = $(this).children("span").first();
$codearea = $(this).children(".input_area");
console.log($(this).children());
$codearea.slideToggle(500, function () {
$header.text(function () {
return $codearea.is(":visible") ? "Collapse Code" : "Expand Code";
});
});
});
});
</script>
"""
CSS_WRAPPER = """
<style type="text/css">
{0}
</style>
"""
#----------------------------------------------------------------------
# Create a custom preprocessor
class SliceIndex(Integer):
"""An integer trait that accepts None"""
default_value = None
def validate(self, obj, value):
if value is None:
return value
else:
return super(SliceIndex, self).validate(obj, value)
class SubCell(Preprocessor):
"""A transformer to select a slice of the cells of a notebook"""
start = SliceIndex(0, config=True,
help="first cell of notebook to be converted")
end = SliceIndex(None, config=True,
help="last cell of notebook to be converted")
def preprocess(self, nb, resources):
nbc = deepcopy(nb)
if IPYTHON_VERSION < 3:
for worksheet in nbc.worksheets:
cells = worksheet.cells[:]
worksheet.cells = cells[self.start:self.end]
else:
nbc.cells = nbc.cells[self.start:self.end]
return nbc, resources
call = preprocess # IPython < 2.0
#----------------------------------------------------------------------
# Custom highlighter:
# instead of using class='highlight', use class='highlight-ipynb'
def custom_highlighter(source, language='ipython', metadata=None):
formatter = HtmlFormatter(cssclass='highlight-ipynb')
if not language:
language = 'ipython'
output = _pygments_highlight(source, formatter, language)
return output.replace('<pre>', '<pre class="ipynb">')
#----------------------------------------------------------------------
# Below is the pelican plugin code.
#
SYNTAX = "{% notebook /path/to/notebook.ipynb [ cells[start:end] ] [ language[language] ] %}"
FORMAT = re.compile(r"""^(\s+)?(?P<src>\S+)(\s+)?((cells\[)(?P<start>-?[0-9]*):(?P<end>-?[0-9]*)(\]))?(\s+)?((language\[)(?P<language>-?[a-z0-9\+\-]*)(\]))?(\s+)?$""")
@LiquidTags.register('notebook')
def notebook(preprocessor, tag, markup):
match = FORMAT.search(markup)
if match:
argdict = match.groupdict()
src = argdict['src']
start = argdict['start']
end = argdict['end']
language = argdict['language']
else:
raise ValueError("Error processing input, "
"expected syntax: {0}".format(SYNTAX))
if start:
start = int(start)
else:
start = 0
if end:
end = int(end)
else:
end = None
language_applied_highlighter = partial(custom_highlighter, language=language)
nb_dir = preprocessor.configs.getConfig('NOTEBOOK_DIR')
nb_path = os.path.join('content', nb_dir, src)
if not os.path.exists(nb_path):
raise ValueError("File {0} could not be found".format(nb_path))
# Create the custom notebook converter
c = Config({'CSSHTMLHeaderTransformer':
{'enabled':True, 'highlight_class':'.highlight-ipynb'},
'SubCell':
{'enabled':True, 'start':start, 'end':end}})
template_file = 'basic'
if IPYTHON_VERSION >= 3:
if os.path.exists('pelicanhtml_3.tpl'):
template_file = 'pelicanhtml_3'
elif IPYTHON_VERSION == 2:
if os.path.exists('pelicanhtml_2.tpl'):
template_file = 'pelicanhtml_2'
else:
if os.path.exists('pelicanhtml_1.tpl'):
template_file = 'pelicanhtml_1'
if IPYTHON_VERSION >= 2:
subcell_kwarg = dict(preprocessors=[SubCell])
else:
subcell_kwarg = dict(transformers=[SubCell])
exporter = HTMLExporter(config=c,
template_file=template_file,
filters={'highlight2html': language_applied_highlighter},
**subcell_kwarg)
# read and parse the notebook
with open(nb_path, encoding="utf-8") as f:
nb_text = f.read()
if IPYTHON_VERSION < 3:
nb_json = IPython.nbformat.current.reads_json(nb_text)
else:
try:
nb_json = nbformat.reads(nb_text, as_version=4)
except:
nb_json = IPython.nbformat.reads(nb_text, as_version=4)
(body, resources) = exporter.from_notebook_node(nb_json)
# if we haven't already saved the header, save it here.
if not notebook.header_saved:
print ("\n ** Writing styles to _nb_header.html: "
"this should be included in the theme. **\n")
header = '\n'.join(CSS_WRAPPER.format(css_line)
for css_line in resources['inlining']['css'])
header += JS_INCLUDE
with open('_nb_header.html', 'w', encoding="utf-8") as f:
f.write(header)
notebook.header_saved = True
# this will stash special characters so that they won't be transformed
# by subsequent processes.
body = preprocessor.configs.htmlStash.store(body, safe=True)
return body
notebook.header_saved = False
#----------------------------------------------------------------------
# This import allows notebook to be a Pelican plugin
from liquid_tags import register
| agpl-3.0 |
tdhopper/scikit-learn | sklearn/tests/test_grid_search.py | 83 | 28713 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
tdhopper/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 303 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
abimannans/scikit-learn | sklearn/datasets/california_housing.py | 197 | 3877 | """California housing dataset.
The original database is available from StatLib
http://lib.stat.cmu.edu/
The data contains 20,640 observations on 9 variables.
This dataset contains the average house value as target variable
and the following input variables (features): average income,
housing average age, average rooms, average bedrooms, population,
average occupation, latitude, and longitude in that order.
References
----------
Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions,
Statistics and Probability Letters, 33 (1997) 291-297.
"""
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from io import BytesIO
from os.path import join, exists
from os import makedirs
from zipfile import ZipFile
try:
# Python 2
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.request import urlopen
import numpy as np
from .base import get_data_home, Bunch
from ..externals import joblib
DATA_URL = "http://lib.stat.cmu.edu/modules.php?op=modload&name=Downloads&"\
"file=index&req=getit&lid=83"
TARGET_FILENAME = "cal_housing.pkz"
# Grab the module-level docstring to use as a description of the
# dataset
MODULE_DOCS = __doc__
def fetch_california_housing(data_home=None, download_if_missing=True):
"""Loader for the California housing dataset from StatLib.
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : ndarray, shape [20640, 8]
Each row corresponding to the 8 feature values in order.
dataset.target : numpy array of shape (20640,)
Each value corresponds to the average house value in units of 100,000.
dataset.feature_names : array of length 8
Array of ordered feature names used in the dataset.
dataset.DESCR : string
Description of the California housing dataset.
Notes
------
This dataset consists of 20,640 samples and 9 features.
"""
data_home = get_data_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
if not exists(join(data_home, TARGET_FILENAME)):
print('downloading Cal. housing from %s to %s' % (DATA_URL, data_home))
fhandle = urlopen(DATA_URL)
buf = BytesIO(fhandle.read())
zip_file = ZipFile(buf)
try:
cadata_fd = zip_file.open('cadata.txt', 'r')
cadata = BytesIO(cadata_fd.read())
# skip the first 27 lines (documentation)
cal_housing = np.loadtxt(cadata, skiprows=27)
joblib.dump(cal_housing, join(data_home, TARGET_FILENAME),
compress=6)
finally:
zip_file.close()
else:
cal_housing = joblib.load(join(data_home, TARGET_FILENAME))
feature_names = ["MedInc", "HouseAge", "AveRooms", "AveBedrms",
"Population", "AveOccup", "Latitude", "Longitude"]
target, data = cal_housing[:, 0], cal_housing[:, 1:]
# avg rooms = total rooms / households
data[:, 2] /= data[:, 5]
# avg bed rooms = total bed rooms / households
data[:, 3] /= data[:, 5]
# avg occupancy = population / housholds
data[:, 5] = data[:, 4] / data[:, 5]
# target in units of 100,000
target = target / 100000.0
return Bunch(data=data,
target=target,
feature_names=feature_names,
DESCR=MODULE_DOCS)
| bsd-3-clause |
tdhopper/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 266 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
abimannans/scikit-learn | examples/calibration/plot_calibration_multiclass.py | 270 | 6972 | """
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Ploit modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
| bsd-3-clause |
EUDAT-B2SHARE/invenio | invenio/legacy/search_engine/query_parser.py | 19 | 56637 | # -*- coding: utf-8 -*-
# This file is part of Invenio.
# Copyright (C) 2008, 2010, 2011, 2012, 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
"""Invenio Search Engine query parsers."""
import re
import string
from datetime import datetime
from six import iteritems
from invenio.modules.indexer.tokenizers.BibIndexAuthorTokenizer import BibIndexAuthorTokenizer as FNT
from invenio.utils.date import GOT_DATEUTIL
if GOT_DATEUTIL:
from invenio.utils.date import du_parser, du_delta, relativedelta
from invenio.utils.logic import to_cnf
from invenio.config import CFG_WEBSEARCH_SPIRES_SYNTAX
from invenio.utils.date import strptime, strftime
NameScanner = FNT()
class InvenioWebSearchMismatchedParensError(Exception):
"""Exception for parse errors caused by mismatched parentheses."""
def __init__(self, message):
"""Initialization."""
self.message = message
def __str__(self):
"""String representation."""
return repr(self.message)
class SearchQueryParenthesisedParser(object):
"""Search query parser that handles arbitrarily-nested parentheses
Parameters:
* substitution_dict: a dictionary mapping strings to other strings. By
default, maps 'and', 'or' and 'not' to '+', '|', and '-'. Dictionary
values will be treated as valid operators for output.
A note (valkyrie 25.03.2011):
Based on looking through the prod search logs, it is evident that users,
when they are using parentheses to do searches, only run word characters
up against parens when they intend the parens to be part of the word (e.g.
U(1)), and when they are using parentheses to combine operators, they put
a space before and after them. As of writing, this is the behavior that
SQPP now expects, in order that it be able to handle such queries as
e(+)e(-) that contain operators in parentheses that should be interpreted
as words.
"""
def __init__(self, substitution_dict = {'and': '+', 'or': '|', 'not': '-'}):
self.substitution_dict = substitution_dict
self.specials = set(['(', ')', '+', '|', '-', '+ -'])
self.__tl_idx = 0
self.__tl_len = 0
# I think my names are both concise and clear
# pylint: disable=C0103
def _invenio_to_python_logical(self, q):
"""Translate the + and - in invenio query strings into & and ~."""
p = q
p = re.sub('\+ -', '&~', p)
p = re.sub('\+', '&', p)
p = re.sub('-', '~', p)
p = re.sub(' ~', ' & ~', p)
return p
def _python_logical_to_invenio(self, q):
"""Translate the & and ~ in logical expression strings into + and -."""
p = q
p = re.sub('\& ~', '-', p)
p = re.sub('~', '-', p)
p = re.sub('\&', '+', p)
return p
# pylint: enable=C0103
def parse_query(self, query):
"""Make query into something suitable for search_engine.
This is the main entry point of the class.
Given an expression of the form:
"expr1 or expr2 (expr3 not (expr4 or expr5))"
produces annoted list output suitable for consumption by search_engine,
of the form:
['+', 'expr1', '|', 'expr2', '+', 'expr3 - expr4 | expr5']
parse_query() is a wrapper for self.tokenize() and self.parse().
"""
toklist = self.tokenize(query)
depth, balanced, dummy_d0_p = self.nesting_depth_and_balance(toklist)
if not balanced:
raise SyntaxError("Mismatched parentheses in "+str(toklist))
toklist, var_subs = self.substitute_variables(toklist)
if depth > 1:
toklist = self.tokenize(self.logically_reduce(toklist))
return self.parse(toklist, var_subs)
def substitute_variables(self, toklist):
"""Given a token list, return a copy of token list in which all free
variables are bound with boolean variable names of the form 'pN'.
Additionally, all the substitutable logical operators are exchanged
for their symbolic form and implicit ands are made explicit
e.g., ((author:'ellis, j' and title:quark) or author:stevens jones)
becomes:
((p0 + p1) | p2 + p3)
with the substitution table:
{'p0': "author:'ellis, j'", 'p1': "title:quark",
'p2': "author:stevens", 'p3': "jones" }
Return value is the substituted token list and a copy of the
substitution table.
"""
def labels():
i = 0
while True:
yield 'p'+str(i)
i += 1
def filter_front_ands(toklist):
"""Filter out extra logical connectives and whitespace from the front."""
while toklist[0] == '+' or toklist[0] == '|' or toklist[0] == '':
toklist = toklist[1:]
return toklist
var_subs = {}
labeler = labels()
new_toklist = ['']
cannot_be_anded = self.specials.difference((')',))
for token in toklist:
token = token.lower()
if token in self.substitution_dict:
if token == 'not' and new_toklist[-1] == '+':
new_toklist[-1] = '-'
else:
new_toklist.append(self.substitution_dict[token])
elif token == '(':
if new_toklist[-1] not in self.specials:
new_toklist.append('+')
new_toklist.append(token)
elif token not in self.specials:
# apparently generators are hard for pylint to figure out
# Turns off msg about labeler not having a 'next' method
# pylint: disable=E1101
label = labeler.next()
# pylint: enable=E1101
var_subs[label] = token
if new_toklist[-1] not in cannot_be_anded:
new_toklist.append('+')
new_toklist.append(label)
else:
if token == '-' and new_toklist[-1] == '+':
new_toklist[-1] = '-'
else:
new_toklist.append(token)
return filter_front_ands(new_toklist), var_subs
def nesting_depth_and_balance(self, token_list):
"""Checks that parentheses are balanced and counts how deep they nest"""
depth = 0
maxdepth = 0
depth0_pairs = 0
good_depth = True
for i in range(len(token_list)):
token = token_list[i]
if token == '(':
if depth == 0:
depth0_pairs += 1
depth += 1
if depth > maxdepth:
maxdepth += 1
elif token == ')':
depth -= 1
if depth == -1: # can only happen with unmatched )
good_depth = False # so force depth check to fail
depth = 0 # but keep maxdepth in good range
return maxdepth, depth == 0 and good_depth, depth0_pairs
def logically_reduce(self, token_list):
"""Return token_list in conjunctive normal form as a string.
CNF has the property that there will only ever be one level of
parenthetical nesting, and all distributable operators (such as
the not in -(p | q) will be fully distributed (as -p + -q).
"""
maxdepth, dummy_balanced, d0_p = self.nesting_depth_and_balance(token_list)
s = ' '.join(token_list)
s = self._invenio_to_python_logical(s)
last_maxdepth = 0
while maxdepth != last_maxdepth: # XXX: sometimes NaryExpr doesn't
try: # fully flatten Expr; but it usually
s = str(to_cnf(s)) # does in 2 passes FIXME: diagnose
except SyntaxError:
raise SyntaxError(str(s)+" couldn't be converted to a logic expression.")
last_maxdepth = maxdepth
maxdepth, dummy_balanced, d0_p = self.nesting_depth_and_balance(self.tokenize(s))
if d0_p == 1 and s[0] == '(' and s[-1] == ')': # s can come back with extra parens
s = s[1:-1]
s = self._python_logical_to_invenio(s)
return s
def tokenize(self, query):
"""Given a query string, return a list of tokens from that string.
* Isolates meaningful punctuation: ( ) + | -
* Keeps single- and double-quoted strings together without interpretation.
* Splits everything else on whitespace.
i.e.:
"expr1|expr2 (expr3-(expr4 or expr5))"
becomes:
['expr1', '|', 'expr2', '(', 'expr3', '-', '(', 'expr4', 'or', 'expr5', ')', ')']
special case:
"e(+)e(-)" interprets '+' and '-' as word characters since they are in parens with
word characters run up against them.
it becomes:
['e(+)e(-)']
"""
###
# Invariants:
# * Query is never modified
# * In every loop iteration, querytokens grows to the right
# * The only return point is at the bottom of the function, and the only
# return value is querytokens
###
def get_tokens(s):
"""
Given string s, return a list of s's tokens.
Adds space around special punctuation, then splits on whitespace.
"""
s = ' '+s
s = s.replace('->', '####DATE###RANGE##OP#') # XXX: Save '->'
s = re.sub('(?P<outside>[a-zA-Z0-9_,=:]+)\((?P<inside>[a-zA-Z0-9_,+-/]*)\)',
'#####\g<outside>####PAREN###\g<inside>##PAREN#', s) # XXX: Save U(1) and SL(2,Z)
s = re.sub('####PAREN###(?P<content0>[.0-9/-]*)(?P<plus>[+])(?P<content1>[.0-9/-]*)##PAREN#',
'####PAREN###\g<content0>##PLUS##\g<content1>##PAREN#', s)
s = re.sub('####PAREN###(?P<content0>([.0-9/]|##PLUS##)*)(?P<minus>[-])' +\
'(?P<content1>([.0-9/]|##PLUS##)*)##PAREN#',
'####PAREN###\g<content0>##MINUS##\g<content1>##PAREN#', s) # XXX: Save e(+)e(-)
for char in self.specials:
if char == '-':
s = s.replace(' -', ' - ')
s = s.replace(')-', ') - ')
s = s.replace('-(', ' - (')
else:
s = s.replace(char, ' '+char+' ')
s = re.sub('##PLUS##', '+', s)
s = re.sub('##MINUS##', '-', s) # XXX: Restore e(+)e(-)
s = re.sub('#####(?P<outside>[a-zA-Z0-9_,=:]+)####PAREN###(?P<inside>[a-zA-Z0-9_,+-/]*)##PAREN#',
'\g<outside>(\g<inside>)', s) # XXX: Restore U(1) and SL(2,Z)
s = s.replace('####DATE###RANGE##OP#', '->') # XXX: Restore '->'
return s.split()
querytokens = []
current_position = 0
re_quotes_match = re.compile(r'(?![\\])(".*?[^\\]")' + r"|(?![\\])('.*?[^\\]')")
for match in re_quotes_match.finditer(query):
match_start = match.start()
quoted_region = match.group(0).strip()
# clean the content after the previous quotes and before current quotes
unquoted = query[current_position : match_start]
querytokens.extend(get_tokens(unquoted))
# XXX: In case we end up with e.g. title:, "compton scattering", make it
# title:"compton scattering"
if querytokens and querytokens[0] and querytokens[-1][-1] == ':':
querytokens[-1] += quoted_region
# XXX: In case we end up with e.g. "expr1",->,"expr2", make it
# "expr1"->"expr2"
elif len(querytokens) >= 2 and querytokens[-1] == '->':
arrow = querytokens.pop()
querytokens[-1] += arrow + quoted_region
else:
# add our newly tokenized content to the token list
querytokens.extend([quoted_region])
# move current position to the end of the tokenized content
current_position = match.end()
# get tokens from the last appearance of quotes until the query end
unquoted = query[current_position : len(query)]
querytokens.extend(get_tokens(unquoted))
return querytokens
def parse(self, token_list, variable_substitution_dict=None):
"""Make token_list consumable by search_engine.
Turns a list of tokens and a variable mapping into a grouped list
of subexpressions in the format suitable for use by search_engine,
e.g.:
['+', 'searchterm', '-', 'searchterm to exclude', '|', 'another term']
Incidentally, this works recursively so parens can cause arbitrarily
deep nestings. But since the search_engine doesn't know about nested
structures, we need to flatten the input structure first.
"""
###
# Invariants:
# * Token list is never modified
# * Balanced parens remain balanced; unbalanced parens are an error
# * Individual tokens may only be exchanged for items in the variable
# substitution dict; otherwise they pass through unmolested
# * Return value is built up mostly as a stack
###
op_symbols = self.substitution_dict.values()
self.__tl_idx = 0
self.__tl_len = len(token_list)
def inner_parse(token_list, open_parens=False):
'''
although it's not in the API, it seems sensible to comment
this function a bit.
dist_token here is a token (e.g. a second-order operator)
which needs to be distributed across other tokens inside
the inner parens
'''
if open_parens:
parsed_values = []
else:
parsed_values = ['+']
i = 0
while i < len(token_list):
token = token_list[i]
if i > 0 and parsed_values[-1] not in op_symbols:
parsed_values.append('+')
if token == '(':
# if we need to distribute something over the tokens inside the parens
# we will know it because... it will end in a :
# that part of the list will be 'px', '+', '('
distributing = (len(parsed_values) > 2 and parsed_values[-2].endswith(':') and parsed_values[-1] == '+')
if distributing:
# we don't need the + if we are distributing
parsed_values = parsed_values[:-1]
offset = self.__tl_len - len(token_list)
inner_value = inner_parse(token_list[i+1:], True)
inner_value = ' '.join(inner_value)
if distributing:
if len(self.tokenize(inner_value)) == 1:
parsed_values[-1] = parsed_values[-1] + inner_value
elif "'" in inner_value:
parsed_values[-1] = parsed_values[-1] + '"' + inner_value + '"'
elif '"' in inner_value:
parsed_values[-1] = parsed_values[-1] + "'" + inner_value + "'"
else:
parsed_values[-1] = parsed_values[-1] + '"' + inner_value + '"'
else:
parsed_values.append(inner_value)
self.__tl_idx += 1
i = self.__tl_idx - offset
elif token == ')':
if parsed_values[-1] in op_symbols:
parsed_values = parsed_values[:-1]
if len(parsed_values) > 1 and parsed_values[0] == '+' and parsed_values[1] in op_symbols:
parsed_values = parsed_values[1:]
return parsed_values
elif token in op_symbols:
if len(parsed_values) > 0:
parsed_values[-1] = token
else:
parsed_values = [token]
else:
if variable_substitution_dict != None and token in variable_substitution_dict:
token = variable_substitution_dict[token]
parsed_values.append(token)
i += 1
self.__tl_idx += 1
# If we have an extra start symbol, remove the default one
if parsed_values[1] in op_symbols:
parsed_values = parsed_values[1:]
return parsed_values
return inner_parse(token_list, False)
class SpiresToInvenioSyntaxConverter:
"""Converts queries defined with SPIRES search syntax into queries
that use Invenio search syntax.
"""
# Constants defining fields
_DATE_ADDED_FIELD = 'datecreated:'
_DATE_UPDATED_FIELD = 'datemodified:'
_DATE_FIELD = 'year:'
_A_TAG = 'author:'
_EA_TAG = 'exactauthor:'
# Dictionary containing the matches between SPIRES keywords
# and their corresponding Invenio keywords or fields
# SPIRES keyword : Invenio keyword or field
_SPIRES_TO_INVENIO_KEYWORDS_MATCHINGS = {
# address
'address' : 'address:',
# affiliation
'affiliation' : 'affiliation:',
'affil' : 'affiliation:',
'aff' : 'affiliation:',
'af' : 'affiliation:',
'institution' : 'affiliation:',
'inst' : 'affiliation:',
# any field
'any' : 'anyfield:',
# author count
'ac' : 'authorcount:',
# bulletin
'bb' : 'reportnumber:',
'bbn' : 'reportnumber:',
'bull' : 'reportnumber:',
'bulletin-bd' : 'reportnumber:',
'bulletin-bd-no' : 'reportnumber:',
'eprint' : 'reportnumber:',
# citation / reference
'c' : 'reference:',
'citation' : 'reference:',
'cited' : 'reference:',
'jour-vol-page' : 'reference:',
'jvp' : 'reference:',
# collaboration
'collaboration' : 'collaboration:',
'collab-name' : 'collaboration:',
'cn' : 'collaboration:',
# conference number
'conf-number' : '111__g:',
'cnum' : '773__w:',
# country
'cc' : '044__a:',
'country' : '044__a:',
# date
'date': _DATE_FIELD,
'd': _DATE_FIELD,
# date added
'date-added': _DATE_ADDED_FIELD,
'dadd': _DATE_ADDED_FIELD,
'da': _DATE_ADDED_FIELD,
# date updated
'date-updated': _DATE_UPDATED_FIELD,
'dupd': _DATE_UPDATED_FIELD,
'du': _DATE_UPDATED_FIELD,
# first author
'fa' : 'firstauthor:',
'first-author' : 'firstauthor:',
# author
'a' : 'author:',
'au' : 'author:',
'author' : 'author:',
'name' : 'author:',
# exact author
# this is not a real keyword match. It is pseudo keyword that
# will be replaced later with author search
'ea' : 'exactauthor:',
'exact-author' : 'exactauthor:',
# experiment
'exp' : 'experiment:',
'experiment' : 'experiment:',
'expno' : 'experiment:',
'sd' : 'experiment:',
'se' : 'experiment:',
# journal
'journal' : 'journal:',
'j' : 'journal:',
'published_in' : 'journal:',
'spicite' : 'journal:',
'vol' : 'volume:',
# journal page
'journal-page' : '773__c:',
'jp' : '773__c:',
# journal year
'journal-year' : '773__y:',
'jy' : '773__y:',
# key
'key' : '970__a:',
'irn' : '970__a:',
'record' : '970__a:',
'document' : '970__a:',
'documents' : '970__a:',
# keywords
'k' : 'keyword:',
'keywords' : 'keyword:',
'kw' : 'keyword:',
# note
'note' : '500__a:',
# old title
'old-title' : '246__a:',
'old-t' : '246__a:',
'ex-ti' : '246__a:',
'et' : '246__a:',
#postal code
'postalcode' : 'postalcode:',
'zip' : 'postalcode:',
'cc' : 'postalcode:',
# ppf subject
'ppf-subject' : '650__a:',
'status' : '650__a:',
# recid
'recid' : 'recid:',
# report number
'r' : 'reportnumber:',
'rn' : 'reportnumber:',
'rept' : 'reportnumber:',
'report' : 'reportnumber:',
'report-num' : 'reportnumber:',
# title
't' : 'title:',
'ti' : 'title:',
'title' : 'title:',
'with-language' : 'title:',
# fulltext
'fulltext' : 'fulltext:',
'ft' : 'fulltext:',
# topic
'topic' : '695__a:',
'tp' : '695__a:',
'hep-topic' : '695__a:',
'desy-keyword' : '695__a:',
'dk' : '695__a:',
# doi
'doi': 'doi:',
# topcite
'topcit' : 'cited:',
'topcite' : 'cited:',
# captions
'caption' : 'caption:',
# category
'arx' : '037__c:',
'category' : '037__c:',
# primarch
'parx' : '037__c:',
'primarch' : '037__c:',
# texkey
'texkey' : '035__%:',
# type code
'tc' : 'collection:',
'ty' : 'collection:',
'type' : 'collection:',
'type-code' : 'collection:',
'scl': 'collection:',
'ps': 'collection:',
# field code
'f' : 'subject:',
'fc' : 'subject:',
'field' : 'subject:',
'field-code' : 'subject:',
'subject' : 'subject:',
# coden
'bc' : 'journal:',
'browse-only-indx' : 'journal:',
'coden' : 'journal:',
'journal-coden' : 'journal:',
# jobs specific codes
'job' : 'title:',
'position' : 'title:',
'region' : 'region:',
'continent' : 'region:',
'deadline' : '046__a:',
'rank' : 'rank:',
'cat' : 'cataloguer:',
# replace all the keywords without match with empty string
# this will remove the noise from the unknown keywrds in the search
# and will in all fields for the words following the keywords
# energy
'e' : '',
'energy' : '',
'energyrange-code' : '',
# exact experiment number
'ee' : '',
'exact-exp' : '',
'exact-expno' : '',
# hidden note
'hidden-note' : '',
'hn' : '',
# ppf
'ppf' : '',
'ppflist' : '',
# slac topics
'ppfa' : '',
'slac-topics' : '',
'special-topics' : '',
'stp' : '',
# test index
'test' : '',
'testindex' : '',
}
_SECOND_ORDER_KEYWORD_MATCHINGS = {
'rawref' : 'rawref:',
'refersto' : 'refersto:',
'refs': 'refersto:',
'citedby' : 'citedby:'
}
_INVENIO_KEYWORDS_FOR_SPIRES_PHRASE_SEARCHES = [
'affiliation:',
#'cited:', # topcite is technically a phrase index - this isn't necessary
'773__y:', # journal-year
'773__c:', # journal-page
'773__w:', # cnum
'044__a:', # country code
'subject:', # field code
'collection:', # type code
'035__z:', # texkey
# also exact expno, corp-auth, url, abstract, doi, mycite, citing
# but we have no invenio equivalents for these ATM
]
def __init__(self):
"""Initialize the state of the converter"""
self._months = {}
self._month_name_to_month_number = {}
self._init_months()
self._compile_regular_expressions()
def _compile_regular_expressions(self):
"""Compiles some of the regular expressions that are used in the class
for higher performance."""
# regular expression that matches the contents in single and double quotes
# taking in mind if they are escaped.
self._re_quotes_match = re.compile(r'(?![\\])(".*?[^\\]")' + r"|(?![\\])('.*?[^\\]')")
# match cases where a keyword distributes across a conjunction
self._re_distribute_keywords = re.compile(r'''(?ix) # verbose, ignorecase on
\b(?P<keyword>\S*:) # a keyword is anything that's not whitespace with a colon
(?P<content>[^:]+?)\s* # content is the part that comes after the keyword; it should NOT
# have colons in it! that implies that we might be distributing
# a keyword OVER another keyword. see ticket #701
(?P<combination>\ and\ not\ |\ and\ |\ or\ |\ not\ )\s*
(?P<last_content>[^:]*?) # oh look, content without a keyword!
(?=\ and\ |\ or\ |\ not\ |$)''')
# massaging SPIRES quirks
self._re_pattern_IRN_search = re.compile(r'970__a:(?P<irn>\d+)')
self._re_topcite_match = re.compile(r'(?P<x>cited:\d+)\+')
# regular expression that matches author patterns
# and author patterns with second-order-ops on top
# does not match names with " or ' around them, since
# those should not be touched
self._re_author_match = re.compile(r'''(?ix) # verbose, ignorecase
\b((?P<secondorderop>[^\s]+:)?) # do we have a second-order-op on top?
((?P<first>first)?)author:(?P<name>
[^\'\"] # first character not a quotemark
[^()]*? # some stuff that isn't parentheses (that is dealt with in pp)
[^\'\"]) # last character not a quotemark
(?=\ and\ not\ |\ and\ |\ or\ |\ not\ |$)''')
# regular expression that matches exact author patterns
# the group defined in this regular expression is used in method
# _convert_spires_exact_author_search_to_invenio_author_search(...)
# in case of changes correct also the code in this method
self._re_exact_author_match = re.compile(r'\b((?P<secondorderop>[^\s]+:)?)exactauthor:(?P<author_name>[^\'\"].*?[^\'\"]\b)(?= and not | and | or | not |$)', re.IGNORECASE)
# match a second-order operator with no operator following it
self._re_second_order_op_no_index_match = re.compile(r'''(?ix) # ignorecase, verbose
(^|\b|:)(?P<second_order_op>(refersto|citedby):)
(?P<search_terms>[^\"\'][^:]+?) # anything without an index should be absorbed here
\s*
(?P<conjunction_or_next_keyword>(\ and\ |\ not\ |\ or\ |\ \w+:\w+|$))
''')
# match search term, its content (words that are searched) and
# the operator preceding the term.
self._re_search_term_pattern_match = re.compile(r'\b(?P<combine_operator>find|and|or|not)\s+(?P<search_term>\S+:)(?P<search_content>.+?)(?= and not | and | or | not |$)', re.IGNORECASE)
# match journal searches
self._re_search_term_is_journal = re.compile(r'''(?ix) # verbose, ignorecase
\b(?P<leading>(find|and|or|not)\s+journal:) # first combining operator and index
(?P<search_content>.+?) # what we are searching
(?=\ and\ not\ |\ and\ |\ or\ |\ not\ |$)''')
# regular expression matching date after pattern
self._re_date_after_match = re.compile(r'\b(?P<searchop>d|date|dupd|dadd|da|date-added|du|date-updated)\b\s*(after|>)\s*(?P<search_content>.+?)(?= and not | and | or | not |$)', re.IGNORECASE)
# regular expression matching date after pattern
self._re_date_before_match = re.compile(r'\b(?P<searchop>d|date|dupd|dadd|da|date-added|du|date-updated)\b\s*(before|<)\s*(?P<search_content>.+?)(?= and not | and | or | not |$)', re.IGNORECASE)
# match date searches which have been keyword-substituted
self._re_keysubbed_date_expr = re.compile(r'\b(?P<term>(' + self._DATE_ADDED_FIELD + ')|(' + self._DATE_UPDATED_FIELD + ')|(' + self._DATE_FIELD + '))(?P<content>.+?)(?= and not | and | or | not |$)', re.IGNORECASE)
# for finding (and changing) a variety of different SPIRES search keywords
self._re_spires_find_keyword = re.compile('^(f|fin|find)\s+', re.IGNORECASE)
# for finding boolean expressions
self._re_boolean_expression = re.compile(r' and | or | not | and not ')
# patterns for subbing out spaces within quotes temporarily
self._re_pattern_single_quotes = re.compile("'(.*?)'")
self._re_pattern_double_quotes = re.compile("\"(.*?)\"")
self._re_pattern_regexp_quotes = re.compile("\/(.*?)\/")
self._re_pattern_space = re.compile("__SPACE__")
self._re_pattern_equals = re.compile("__EQUALS__")
# for date math:
self._re_datemath = re.compile(r'(?P<datestamp>.+)\s+(?P<operator>[-+])\s+(?P<units>\d+)')
def is_applicable(self, query):
"""Is this converter applicable to this query?
Return true if query begins with find, fin, or f, or if it contains
a SPIRES-specific keyword (a, t, etc.), or if it contains the invenio
author: field search. """
if not CFG_WEBSEARCH_SPIRES_SYNTAX:
#SPIRES syntax is switched off
return False
query = query.lower()
if self._re_spires_find_keyword.match(query):
#leading 'find' is present and SPIRES syntax is switched on
return True
if CFG_WEBSEARCH_SPIRES_SYNTAX > 1:
query = self._re_pattern_double_quotes.sub('', query)
for word in query.split(' '):
if word in self._SPIRES_TO_INVENIO_KEYWORDS_MATCHINGS:
return True
return False
def convert_query(self, query):
"""Convert SPIRES syntax queries to Invenio syntax.
Do nothing to queries not in SPIRES syntax."""
# SPIRES syntax allows searches with 'find' or 'fin'.
if self.is_applicable(query):
query = re.sub(self._re_spires_find_keyword, 'find ', query)
if not query.startswith('find'):
query = 'find ' + query
# a holdover from SPIRES syntax is e.g. date = 2000 rather than just date 2000
query = self._remove_extraneous_equals_signs(query)
# these calls are before keywords replacement because when keywords
# are replaced, date keyword is replaced by specific field search
# and the DATE keyword is not match in DATE BEFORE or DATE AFTER
query = self._convert_spires_date_before_to_invenio_span_query(query)
query = self._convert_spires_date_after_to_invenio_span_query(query)
# call to _replace_spires_keywords_with_invenio_keywords should be at the
# beginning because the next methods use the result of the replacement
query = self._standardize_already_invenio_keywords(query)
query = self._replace_spires_keywords_with_invenio_keywords(query)
query = self._normalise_journal_page_format(query)
query = self._distribute_keywords_across_combinations(query)
query = self._distribute_and_quote_second_order_ops(query)
query = self._convert_all_dates(query)
query = self._convert_irns_to_spires_irns(query)
query = self._convert_topcite_to_cited(query)
query = self._convert_spires_author_search_to_invenio_author_search(query)
query = self._convert_spires_exact_author_search_to_invenio_author_search(query)
query = self._convert_spires_truncation_to_invenio_truncation(query)
query = self._expand_search_patterns(query)
# remove FIND in the beginning of the query as it is not necessary in Invenio
query = query[4:]
query = query.strip()
return query
def _init_months(self):
"""Defines a dictionary matching the name
of the month with its corresponding number"""
# this dictionary is used when generating match patterns for months
self._months = {'jan':'01', 'january':'01',
'feb':'02', 'february':'02',
'mar':'03', 'march':'03',
'apr':'04', 'april':'04',
'may':'05', 'may':'05',
'jun':'06', 'june':'06',
'jul':'07', 'july':'07',
'aug':'08', 'august':'08',
'sep':'09', 'september':'09',
'oct':'10', 'october':'10',
'nov':'11', 'november':'11',
'dec':'12', 'december':'12'}
# this dictionary is used to transform name of the month
# to a number used in the date format. By this reason it
# contains also the numbers itself to simplify the conversion
self._month_name_to_month_number = {'1':'01', '01':'01',
'2':'02', '02':'02',
'3':'03', '03':'03',
'4':'04', '04':'04',
'5':'05', '05':'05',
'6':'06', '06':'06',
'7':'07', '07':'07',
'8':'08', '08':'08',
'9':'09', '09':'09',
'10':'10',
'11':'11',
'12':'12',}
# combine it with months in order to cover all the cases
self._month_name_to_month_number.update(self._months)
def _get_month_names_match(self):
"""Retruns part of a patter that matches month in a date"""
months_match = ''
for month_name in self._months.keys():
months_match = months_match + month_name + '|'
months_match = r'\b(' + months_match[0:-1] + r')\b'
return months_match
def _convert_all_dates(self, query):
"""Tries to find dates in query and make them look like ISO-8601."""
def mangle_with_dateutils(query):
result = ''
position = 0
for match in self._re_keysubbed_date_expr.finditer(query):
result += query[position : match.start()]
datestamp = match.group('content')
daterange = self.convert_date(datestamp)
result += match.group('term') + daterange
position = match.end()
result += query[position : ]
return result
if GOT_DATEUTIL:
query = mangle_with_dateutils(query)
# else do nothing with the dates
return query
def convert_date(self, date_str):
def parse_relative_unit(date_str):
units = 0
datemath = self._re_datemath.match(date_str)
if datemath:
date_str = datemath.group('datestamp')
units = int(datemath.group('operator') + datemath.group('units'))
return date_str, units
def guess_best_year(d):
if d.year > datetime.today().year + 10:
return d - du_delta(years=100)
else:
return d
def parse_date_unit(date_str):
begin = date_str
end = None
# First split, relative time directive
# e.g. "2012-01-01 - 3" to ("2012-01-01", -3)
date_str, relative_units = parse_relative_unit(date_str)
try:
d = strptime(date_str, '%Y-%m-%d')
d += du_delta(days=relative_units)
return strftime('%Y-%m-%d', d), end
except ValueError:
pass
try:
d = strptime(date_str, '%y-%m-%d')
d += du_delta(days=relative_units)
d = guess_best_year(d)
return strftime('%Y-%m-%d', d), end
except ValueError:
pass
for date_fmt in ('%Y-%m', '%y-%m', '%m/%y', '%m/%Y'):
try:
d = strptime(date_str, date_fmt)
d += du_delta(months=relative_units)
return strftime('%Y-%m', d), end
except ValueError:
pass
try:
d = strptime(date_str, '%Y')
d += du_delta(years=relative_units)
return strftime('%Y', d), end
except ValueError:
pass
try:
d = strptime(date_str, '%y')
d += du_delta(days=relative_units)
d = guess_best_year(d)
return strftime('%Y', d), end
except ValueError:
pass
try:
d = strptime(date_str, '%b %y')
d = guess_best_year(d)
return strftime('%Y-%m', d), end
except ValueError:
pass
if 'this week' in date_str:
# Past monday to today
# This week is iffy, not sure if we should
# start with sunday or monday
begin = datetime.today()
begin += du_delta(weekday=relativedelta.SU(-1))
end = datetime.today()
begin = strftime('%Y-%m-%d', begin)
end = strftime('%Y-%m-%d', end)
elif 'last week' in date_str:
# Past monday to today
# Same problem as last week
begin = datetime.today()
begin += du_delta(weekday=relativedelta.SU(-2))
end = begin + du_delta(weekday=relativedelta.SA(1))
begin = strftime('%Y-%m-%d', begin)
end = strftime('%Y-%m-%d', end)
elif 'this month' in date_str:
d = datetime.today()
begin = strftime('%Y-%m', d)
elif 'last month' in date_str:
d = datetime.today() - du_delta(months=1)
begin = strftime('%Y-%m', d)
elif 'yesterday' in date_str:
d = datetime.today() - du_delta(days=1)
begin = strftime('%Y-%m-%d', d)
elif 'today' in date_str:
start = datetime.today()
start += du_delta(days=relative_units)
begin = strftime('%Y-%m-%d', start)
elif date_str.strip() == '0':
begin = '0'
else:
default = datetime(datetime.today().year, 1, 1)
try:
d = du_parser.parse(date_str, default=default)
except (ValueError, TypeError):
begin = date_str
else:
begin = strftime('%Y-%m-%d', d)
return begin, end
if '->' in date_str:
begin_unit, end_unit = date_str.split('->', 1)
begin, dummy = parse_date_unit(begin_unit)
end, dummy = parse_date_unit(end_unit)
else:
begin, end = parse_date_unit(date_str)
if end:
daterange = '%s->%s' % (begin, end)
else:
daterange = begin
return daterange
def _convert_irns_to_spires_irns(self, query):
"""Prefix IRN numbers with SPIRES- so they match the INSPIRE format."""
def create_replacement_pattern(match):
"""method used for replacement with regular expression"""
return '970__a:SPIRES-' + match.group('irn')
query = self._re_pattern_IRN_search.sub(create_replacement_pattern, query)
return query
def _convert_topcite_to_cited(self, query):
"""Replace SPIRES topcite x+ with cited:x->999999999"""
def create_replacement_pattern(match):
"""method used for replacement with regular expression"""
return match.group('x') + '->999999999'
query = self._re_topcite_match.sub(create_replacement_pattern, query)
return query
def _convert_spires_date_after_to_invenio_span_query(self, query):
"""Converts date after SPIRES search term into invenio span query"""
def create_replacement_pattern(match):
"""method used for replacement with regular expression"""
return match.group('searchop') + ' ' + match.group('search_content') + '->9999'
query = self._re_date_after_match.sub(create_replacement_pattern, query)
return query
def _convert_spires_date_before_to_invenio_span_query(self, query):
"""Converts date before SPIRES search term into invenio span query"""
# method used for replacement with regular expression
def create_replacement_pattern(match):
return match.group('searchop') + ' ' + '0->' + match.group('search_content')
query = self._re_date_before_match.sub(create_replacement_pattern, query)
return query
def _expand_search_patterns(self, query):
"""Expands search queries.
If a search term is followed by several words e.g.
author:ellis or title:THESE THREE WORDS it is expanded to
author:ellis or (title:THESE and title:THREE...)
All keywords are thus expanded. XXX: this may lead to surprising
results for any later parsing stages if we're not careful.
"""
def create_replacements(term, content):
result = ''
content = content.strip()
# replace spaces within quotes by __SPACE__ temporarily:
content = self._re_pattern_single_quotes.sub(lambda x: "'"+string.replace(x.group(1), ' ', '__SPACE__')+"'", content)
content = self._re_pattern_double_quotes.sub(lambda x: "\""+string.replace(x.group(1), ' ', '__SPACE__')+"\"", content)
content = self._re_pattern_regexp_quotes.sub(lambda x: "/"+string.replace(x.group(1), ' ', '__SPACE__')+"/", content)
if term in self._INVENIO_KEYWORDS_FOR_SPIRES_PHRASE_SEARCHES \
and not self._re_boolean_expression.search(content) and ' ' in content:
# the case of things which should be searched as phrases
result = term + '"' + content + '"'
else:
words = content.split()
if len(words) == 0:
# this should almost never happen, req user to say 'find a junk:'
result = term
elif len(words) == 1:
# this is more common but still occasional
result = term + words[0]
else:
# general case
result = '(' + term + words[0]
for word in words[1:]:
result += ' and ' + term + word
result += ')'
# replace back __SPACE__ by spaces:
result = self._re_pattern_space.sub(" ", result)
return result.strip()
result = ''
current_position = 0
for match in self._re_search_term_pattern_match.finditer(query):
result += query[current_position : match.start()]
result += ' ' + match.group('combine_operator') + ' '
result += create_replacements(match.group('search_term'), match.group('search_content'))
current_position = match.end()
result += query[current_position : len(query)]
return result.strip()
def _remove_extraneous_equals_signs(self, query):
"""In SPIRES, both date = 2000 and date 2000 are acceptable. Get rid of the ="""
query = self._re_pattern_single_quotes.sub(lambda x: "'"+string.replace(x.group(1), '=', '__EQUALS__')+"'", query)
query = self._re_pattern_double_quotes.sub(lambda x: "\""+string.replace(x.group(1), '=', '__EQUALS__')+'\"', query)
query = self._re_pattern_regexp_quotes.sub(lambda x: "/"+string.replace(x.group(1), '=', '__EQUALS__')+"/", query)
query = query.replace('=', '')
query = self._re_pattern_equals.sub("=", query)
return query
def _convert_spires_truncation_to_invenio_truncation(self, query):
"""Replace SPIRES truncation symbol # with invenio trancation symbol *"""
return query.replace('#', '*')
def _convert_spires_exact_author_search_to_invenio_author_search(self, query):
"""Converts SPIRES search patterns for exact author into search pattern
for invenio"""
# method used for replacement with regular expression
def create_replacement_pattern(match):
# the regular expression where this group name is defined is in
# the method _compile_regular_expressions()
return self._EA_TAG + '"' + match.group('author_name') + '"'
query = self._re_exact_author_match.sub(create_replacement_pattern, query)
return query
def _convert_spires_author_search_to_invenio_author_search(self, query):
"""Converts SPIRES search patterns for authors to search patterns in invenio
that give similar results to the spires search.
"""
# result of the replacement
result = ''
current_position = 0
for match in self._re_author_match.finditer(query):
result += query[current_position : match.start() ]
if match.group('secondorderop'):
result += match.group('secondorderop')
scanned_name = NameScanner.scan_string_for_phrases(match.group('name'))
author_atoms = self._create_author_search_pattern_from_fuzzy_name_dict(scanned_name)
if match.group('first'):
author_atoms = author_atoms.replace('author:', 'firstauthor:')
if author_atoms.find(' ') == -1:
result += author_atoms + ' '
else:
result += '(' + author_atoms + ') '
current_position = match.end()
result += query[current_position : len(query)]
return result
def _create_author_search_pattern_from_fuzzy_name_dict(self, fuzzy_name):
"""Creates an invenio search pattern for an author from a fuzzy name dict"""
author_name = ''
author_middle_name = ''
author_surname = ''
full_search = ''
if len(fuzzy_name['nonlastnames']) > 0:
author_name = fuzzy_name['nonlastnames'][0]
if len(fuzzy_name['nonlastnames']) == 2:
author_middle_name = fuzzy_name['nonlastnames'][1]
if len(fuzzy_name['nonlastnames']) > 2:
author_middle_name = ' '.join(fuzzy_name['nonlastnames'][1:])
if fuzzy_name['raw']:
full_search = fuzzy_name['raw']
author_surname = ' '.join(fuzzy_name['lastnames'])
NAME_IS_INITIAL = (len(author_name) == 1)
NAME_IS_NOT_INITIAL = not NAME_IS_INITIAL
# we expect to have at least surname
if author_surname == '' or author_surname == None:
return ''
# ellis ---> "author:ellis"
#if author_name == '' or author_name == None:
if not author_name:
return self._A_TAG + author_surname
# ellis, j ---> "ellis, j*"
if NAME_IS_INITIAL and not author_middle_name:
return self._A_TAG + '"' + author_surname + ', ' + author_name + '*"'
# if there is middle name we expect to have also name and surname
# ellis, j. r. ---> ellis, j* r*
# j r ellis ---> ellis, j* r*
# ellis, john r. ---> ellis, j* r* or ellis, j. r. or ellis, jo. r.
# ellis, john r. ---> author:ellis, j* r* or exactauthor:ellis, j r or exactauthor:ellis jo r
if author_middle_name:
search_pattern = self._A_TAG + '"' + author_surname + ', ' + author_name + '*' + ' ' + author_middle_name.replace(" ","* ") + '*"'
if NAME_IS_NOT_INITIAL:
for i in range(1, len(author_name)):
search_pattern += ' or ' + self._EA_TAG + "\"%s, %s %s\"" % (author_surname, author_name[0:i], author_middle_name)
return search_pattern
# ellis, jacqueline ---> "ellis, jacqueline" or "ellis, j.*" or "ellis, j" or "ellis, ja.*" or "ellis, ja" or "ellis, jacqueline *, ellis, j *"
# in case we don't use SPIRES data, the ending dot is ommited.
search_pattern = self._A_TAG + '"' + author_surname + ', ' + author_name + '*"'
search_pattern += " or " + self._EA_TAG + "\"%s, %s *\"" % (author_surname, author_name[0])
if NAME_IS_NOT_INITIAL:
for i in range(1,len(author_name)):
search_pattern += ' or ' + self._EA_TAG + "\"%s, %s\"" % (author_surname, author_name[0:i])
search_pattern += ' or %s"%s, *"' % (self._A_TAG, full_search)
return search_pattern
def _normalise_journal_page_format(self, query):
"""Phys.Lett, 0903, 024 -> Phys.Lett,0903,024"""
def _is_triple(search):
return (len(re.findall('\s+', search)) + len(re.findall(':', search))) == 2
def _normalise_spaces_and_colons_to_commas_in_triple(search):
if not _is_triple(search):
return search
search = re.sub(',\s+', ',', search)
search = re.sub('\s+', ',', search)
search = re.sub(':', ',', search)
return search
result = ""
current_position = 0
for match in self._re_search_term_is_journal.finditer(query):
result += query[current_position : match.start()]
result += match.group('leading')
search = match.group('search_content')
search = _normalise_spaces_and_colons_to_commas_in_triple(search)
result += search
current_position = match.end()
result += query[current_position : ]
return result
def _standardize_already_invenio_keywords(self, query):
"""Replaces invenio keywords kw with "and kw" in order to
parse them correctly further down the line."""
unique_invenio_keywords = set(self._SPIRES_TO_INVENIO_KEYWORDS_MATCHINGS.values()) |\
set(self._SECOND_ORDER_KEYWORD_MATCHINGS.values())
unique_invenio_keywords.remove('') # for the ones that don't have invenio equivalents
for invenio_keyword in unique_invenio_keywords:
query = re.sub("(?<!... \+|... -| and |. or | not |....:)"+invenio_keyword, "and "+invenio_keyword, query)
query = re.sub("\+"+invenio_keyword, "and "+invenio_keyword, query)
query = re.sub("-"+invenio_keyword, "and not "+invenio_keyword, query)
return query
def _replace_spires_keywords_with_invenio_keywords(self, query):
"""Replaces SPIRES keywords that have directly
corresponding Invenio keywords
Replacements are done only in content that is not in quotes."""
# result of the replacement
result = ""
current_position = 0
for match in self._re_quotes_match.finditer(query):
# clean the content after the previous quotes and before current quotes
cleanable_content = query[current_position : match.start()]
cleanable_content = self._replace_all_spires_keywords_in_string(cleanable_content)
# get the content in the quotes (group one matches double
# quotes, group 2 singles)
if match.group(1):
quoted_content = match.group(1)
elif match.group(2):
quoted_content = match.group(2)
# append the processed content to the result
result = result + cleanable_content + quoted_content
# move current position at the end of the processed content
current_position = match.end()
# clean the content from the last appearance of quotes till the end of the query
cleanable_content = query[current_position : len(query)]
cleanable_content = self._replace_all_spires_keywords_in_string(cleanable_content)
result = result + cleanable_content
return result
def _replace_all_spires_keywords_in_string(self, query):
"""Replaces all SPIRES keywords in the string with their
corresponding Invenio keywords"""
for spires_keyword, invenio_keyword in iteritems(self._SPIRES_TO_INVENIO_KEYWORDS_MATCHINGS):
query = self._replace_keyword(query, spires_keyword, invenio_keyword)
for spires_keyword, invenio_keyword in iteritems(self._SECOND_ORDER_KEYWORD_MATCHINGS):
query = self._replace_second_order_keyword(query, spires_keyword, invenio_keyword)
return query
def _replace_keyword(self, query, old_keyword, new_keyword):
"""Replaces old keyword in the query with a new keyword"""
regex_string = r'(?P<operator>(^find|\band|\bor|\bnot|\brefersto|\bcitedby|^)\b[:\s\(]*)' + \
old_keyword + r'(?P<end>[\s\(]+|$)'
regular_expression = re.compile(regex_string, re.IGNORECASE)
result = regular_expression.sub(r'\g<operator>' + new_keyword + r'\g<end>', query)
result = re.sub(':\s+', ':', result)
return result
def _replace_second_order_keyword(self, query, old_keyword, new_keyword):
"""Replaces old second-order keyword in the query with a new keyword"""
regular_expression =\
re.compile(r'''(?ix) # verbose, ignorecase
(?P<operator>
(^find|\band|\bor|\bnot|\brefersto|\bcitedby|^)\b # operator preceding our operator
[:\s\(]* # trailing colon, spaces, parens, etc. for that operator
)
%s # the keyword we're searching for
(?P<endorop>
\s*[a-z]+:| # either an operator (like author:)
[\s\(]+| # or a paren opening
$ # or the end of the string
)''' % old_keyword)
result = regular_expression.sub(r'\g<operator>' + new_keyword + r'\g<endorop>', query)
result = re.sub(':\s+', ':', result)
return result
def _distribute_keywords_across_combinations(self, query):
"""author:ellis and james -> author:ellis and author:james"""
# method used for replacement with regular expression
def create_replacement_pattern(match):
return match.group('keyword') + match.group('content') + \
match.group('combination') + match.group('keyword') + \
match.group('last_content')
still_matches = True
while still_matches:
query = self._re_distribute_keywords.sub(create_replacement_pattern, query)
still_matches = self._re_distribute_keywords.search(query)
query = re.sub(r'\s+', ' ', query)
return query
def _distribute_and_quote_second_order_ops(self, query):
"""refersto:s parke -> refersto:\"s parke\""""
def create_replacement_pattern(match):
return match.group('second_order_op') + '"' +\
match.group('search_terms') + '"' +\
match.group('conjunction_or_next_keyword')
for match in self._re_second_order_op_no_index_match.finditer(query):
query = self._re_second_order_op_no_index_match.sub(create_replacement_pattern, query)
query = re.sub(r'\s+', ' ', query)
return query
| gpl-2.0 |
r-mart/scikit-learn | examples/cluster/plot_cluster_comparison.py | 244 | 4684 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
JeanKossaifi/scikit-learn | examples/cluster/plot_cluster_comparison.py | 244 | 4684 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
Intel-Corporation/tensorflow | tensorflow/python/keras/engine/training_arrays_v1.py | 14 | 28123 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Part of the Keras training engine related to plain array data."""
# pylint: disable=protected-access
import functools
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.keras import backend
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras.distribute import distributed_training_utils_v1
from tensorflow.python.keras.engine import training_utils_v1
from tensorflow.python.keras.utils.generic_utils import make_batches
from tensorflow.python.keras.utils.generic_utils import slice_arrays
from tensorflow.python.keras.utils.mode_keys import ModeKeys
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
try:
from scipy.sparse import issparse # pylint: disable=g-import-not-at-top
except ImportError:
issparse = None
def model_iteration(model,
inputs,
targets=None,
sample_weights=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
val_inputs=None,
val_targets=None,
val_sample_weights=None,
shuffle=True,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_freq=1,
mode=ModeKeys.TRAIN,
validation_in_fit=False,
prepared_feed_values_from_dataset=False,
steps_name='steps',
**kwargs):
"""Loop function for arrays of data with modes TRAIN/TEST/PREDICT.
Args:
model: Keras Model instance.
inputs: Either a list or dictionary of arrays, or a dataset instance.
targets: List/dictionary of input arrays.
sample_weights: Optional list of sample weight arrays.
batch_size: Integer batch size or None if unknown.
epochs: Number of times to iterate over the data
verbose: 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = one line per epoch.
Note that the progress bar is not particularly useful when
logged to a file, so verbose=2 is recommended when not running
interactively (eg, in a production environment).
callbacks: List of callbacks to be called during training
val_inputs: Either a list or dictionary of arrays, or a dataset instance.
val_targets: List/dictionary of target arrays.
val_sample_weights: Optional list of sample weight arrays.
shuffle: Whether to shuffle the data at the beginning of each epoch
concatenation of list the display names of the outputs of `f` and the
list of display names of the outputs of `f_val`.
initial_epoch: Epoch at which to start training (useful for resuming a
previous training run)
steps_per_epoch: Total number of steps (batches of samples) before
declaring one epoch finished and starting the next epoch. Ignored with
the default value of `None`.
validation_steps: Number of steps to run validation for (only if doing
validation from data tensors). Ignored with the default value of
`None`.
validation_freq: Only relevant if validation data is provided. Integer or
`collections.abc.Container` instance (e.g. list, tuple, etc.). If an
integer, specifies how many training epochs to run before a new
validation run is performed, e.g. `validation_freq=2` runs
validation every 2 epochs. If a Container, specifies the epochs on
which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
validation at the end of the 1st, 2nd, and 10th epochs.
mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.
validation_in_fit: if true, then this method is invoked from within
training iteration (for validation). In the case where `val_inputs` is
a dataset, this flag indicates that its iterator and feed values are
already created so should properly reuse resources.
prepared_feed_values_from_dataset: if True, `inputs` is a list of feed
tensors returned from `_prepare_feed_values` call on the validation
dataset, so do not call it again on `inputs`. Should only be used for
inline validation (i.e., only if `validation_in_fit` is also True).
steps_name: The string name of the steps argument, either `steps`,
`validation_steps`, or `steps_per_epoch`. Only used for error message
formatting.
**kwargs: Additional arguments for backwards compatibility.
Returns:
- In TRAIN mode: `History` object.
- In TEST mode: Evaluation metrics.
- In PREDICT mode: Outputs of the Model called on inputs.
Raises:
ValueError: in case of invalid arguments.
"""
# Backwards compatibility.
if 'steps' in kwargs:
steps_per_epoch = kwargs.pop('steps')
if kwargs:
raise TypeError('Unknown arguments: %s' % (kwargs,))
# In case we were passed a dataset, we extract symbolic tensors from it.
reset_dataset_after_each_epoch = False
input_iterator = None
is_dataset = isinstance(inputs,
(dataset_ops.DatasetV1, dataset_ops.DatasetV2))
# TODO(fchollet): consider moving `steps_per_epoch` inference to
# _standardize_user_data and set reset_dataset_after_each_epoch as an
# attribute on the dataset instance.
if is_dataset:
if steps_per_epoch is None:
reset_dataset_after_each_epoch = True
steps_per_epoch = training_utils_v1.infer_steps_for_dataset(
model, inputs, steps_per_epoch, epochs=epochs, steps_name=steps_name)
input_iterator = _get_iterator(inputs, model._distribution_strategy)
# Enter tf.distribute.Strategy scope.
if model._distribution_strategy:
scope = distributed_training_utils_v1.distributed_scope(
strategy=model._distribution_strategy,
learning_phase=(1 if mode == ModeKeys.TRAIN else 0))
scope.__enter__()
use_steps = is_dataset or steps_per_epoch is not None
do_validation = val_inputs is not None
# Prepare input data.
inputs = input_iterator or inputs
if validation_in_fit and prepared_feed_values_from_dataset:
# When invoking validation in training loop, avoid creating iterator and
# list of feed values for the same validation dataset multiple times (which
# essentially would call `iterator.get_next()` that slows down execution and
# leads to OOM errors eventually.
ins = inputs
else:
ins = _prepare_feed_values(model, inputs, targets, sample_weights, mode)
# `ins` is a function when a distribute strategy is used in Eager mode. In
# that case `is_dataset` is True. The code branches that have requirements
# about the type of `ins` do not trigger in the distributed case.
if not is_dataset:
num_samples_or_steps = _get_num_samples_or_steps(ins, batch_size,
steps_per_epoch)
else:
num_samples_or_steps = steps_per_epoch
# Update sample_weight_mode of the model if sample_weights is specified by the
# user. We need to call this function after we have a handle on the inputs
# (both numpy arrays and datasets) in order to determine if the user has
# specified sample_weights.
_update_sample_weight_mode(model, mode, ins)
# Get step function and loop type. As part of building the execution
# function we recompile the metrics based on the updated
# sample_weight_mode value.
f = _make_execution_function(model, mode)
# Prepare validation data. Hold references to the iterator and the input list
# to properly reinitialize and reuse in multiple validation passes.
val_iterator = None
if isinstance(val_inputs, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)):
if validation_steps is None:
# Because we pass an iterator feed instead of a Dataset to the eval
# model_iteration() call, it will not trigger the dataset-input path
# that determines the number of steps required. To avoid this issue,
# set validation_steps here if validation_steps is None.
validation_steps = training_utils_v1.infer_steps_for_dataset(
model,
val_inputs,
validation_steps,
epochs=epochs,
steps_name='validation_steps')
val_iterator = _get_iterator(val_inputs, model._distribution_strategy)
val_inputs = _prepare_feed_values(
model, val_iterator, val_targets, val_sample_weights, ModeKeys.TEST)
# Get num steps for printing.
val_samples_or_steps = validation_steps
else:
# Get num samples for printing.
val_samples_or_steps = val_inputs and nest.flatten(
val_inputs)[0].shape[0] or None
if mode == ModeKeys.TRAIN and verbose:
_print_train_info(num_samples_or_steps, val_samples_or_steps, is_dataset)
# Configure callbacks.
count_mode = 'steps' if use_steps else 'samples'
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=do_validation,
batch_size=batch_size,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
samples=num_samples_or_steps,
count_mode=count_mode,
verbose=verbose,
mode=mode)
# Find beforehand arrays that need sparse-to-dense conversion.
if issparse is not None and not use_steps:
indices_for_conversion_to_dense = []
feed = _get_model_feed(model, mode)
for i, (input_data, feed_tensor) in enumerate(zip(ins, feed)):
if issparse(input_data) and not backend.is_sparse(feed_tensor):
indices_for_conversion_to_dense.append(i)
# Select aggregation method.
if mode == ModeKeys.PREDICT:
aggregator = training_utils_v1.OutputsAggregator(
use_steps,
num_samples=None if steps_per_epoch else num_samples_or_steps,
steps=steps_per_epoch)
else:
aggregator = training_utils_v1.MetricsAggregator(
use_steps,
num_samples=None if steps_per_epoch else num_samples_or_steps,
steps=steps_per_epoch)
if model._compile_distribution:
distributed_training_utils_v1._copy_weights_to_distributed_model(
model, mode)
callbacks.model.stop_training = False
callbacks._call_begin_hook(mode)
initial_epoch = model._maybe_load_initial_epoch_from_ckpt(initial_epoch, mode)
for epoch in range(initial_epoch, epochs):
if callbacks.model.stop_training:
break
# Setup work for each epoch
epoch_logs = {}
if mode != ModeKeys.PREDICT:
# Collecting and resetting metrics has non-zero cost and will needlessly
# slow down model.predict.
model.reset_metrics()
if mode == ModeKeys.TRAIN:
callbacks.on_epoch_begin(epoch, epoch_logs)
if use_steps:
# Step-wise loop.
if steps_per_epoch is None:
# Loop over dataset until `OutOfRangeError` is raised.
target_steps = np.inf
else:
# Loop over dataset for the specified number of steps.
target_steps = steps_per_epoch
step = 0
while step < target_steps:
batch_logs = {'batch': step, 'size': 1}
callbacks._call_batch_hook(mode, 'begin', step, batch_logs)
# Get outputs.
try:
# `ins` can be callable in tf.distribute.Strategy + eager case.
if not callable(ins) or (model._distribution_strategy and
not distributed_training_utils_v1
.is_distributing_by_cloning(model)):
actual_inputs = ins
else:
actual_inputs = ins()
batch_outs = f(actual_inputs)
except errors.OutOfRangeError:
if is_dataset:
# The dataset passed by the user ran out of batches.
# Now we know the cardinality of the dataset.
# If steps_per_epoch was specified, then running out of data is
# unexpected, so we stop training and inform the user.
if steps_per_epoch:
callbacks.model.stop_training = True
logging.warning(
'Your dataset ran out of data; interrupting training. '
'Make sure that your dataset can generate at least '
'`%s * epochs` batches (in this case, %d batches). '
'You may need to use the repeat() function when '
'building your dataset.'
% (steps_name, steps_per_epoch * epochs))
elif step > 0:
steps_per_epoch = step
aggregator.steps = steps_per_epoch
else:
# We ran out of batches while the user passed an iterator (legacy).
callbacks.model.stop_training = True
logging.warning(
'Your dataset iterator ran out of data; '
'interrupting training. Make sure that your iterator '
'can generate at least `%s * epochs` '
'batches (in this case, %d batches). You may need to'
'use the repeat() function when building your '
'dataset.' % (steps_name, steps_per_epoch * epochs))
break
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if model._distribution_strategy:
batch_outs = (
distributed_training_utils_v1._per_replica_aggregate_batch(
model._distribution_strategy, batch_outs, model, mode))
# Aggregate results.
if step == 0:
aggregator.create(batch_outs)
aggregator.aggregate(batch_outs)
# Callbacks batch end.
batch_logs = cbks.make_logs(model, batch_logs, batch_outs, mode)
callbacks._call_batch_hook(mode, 'end', step, batch_logs)
step += 1
if callbacks.model.stop_training:
break
else:
# Sample-wise loop.
index_array = np.arange(num_samples_or_steps)
if shuffle == 'batch':
index_array = training_utils_v1.batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = make_batches(num_samples_or_steps, batch_size)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
# Slice into a batch.
if len(batches) == 1:
# If we only have one batch, do not slice. This takes care of
# composite tensors in non-Dataset modes; we currently don't support
# slicing them.
# TODO(b/133517906): Add slicing support.
ins_batch = ins
else:
try:
if ins and isinstance(ins[-1], int):
# Do not slice the training phase flag.
ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_arrays(ins, batch_ids)
except TypeError:
raise TypeError('TypeError while preparing batch. '
'If using HDF5 input data, '
'pass shuffle="batch".')
# Sparse to dense conversion.
if issparse is not None:
for i in indices_for_conversion_to_dense:
ins_batch[i] = ins_batch[i].toarray()
# Callbacks batch_begin.
batch_logs = {'batch': batch_index, 'size': len(batch_ids)}
callbacks._call_batch_hook(mode, 'begin', batch_index, batch_logs)
# Get outputs.
batch_outs = f(ins_batch)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
# Aggregate results.
if batch_index == 0:
aggregator.create(batch_outs)
aggregator.aggregate(batch_outs, batch_start, batch_end)
# Callbacks batch end.
batch_logs = cbks.make_logs(model, batch_logs, batch_outs, mode)
callbacks._call_batch_hook(mode, 'end', batch_index, batch_logs)
if callbacks.model.stop_training:
break
aggregator.finalize()
results = aggregator.results
epoch_logs = cbks.make_logs(model, epoch_logs, results, mode)
if len(results) == 1:
results = results[0]
# Run the test loop every `validation_freq` epochs during training.
if (do_validation and
training_utils_v1.should_run_validation(validation_freq, epoch) and
not callbacks.model.stop_training):
if model._compile_distribution:
# Since we create a new clone from the original model we need to copy
# the weights back to the original model before we can run validation.
distributed_training_utils_v1._copy_weights_to_original_model(
model, ModeKeys.TRAIN)
val_results = model_iteration(
model,
val_inputs,
targets=val_targets,
sample_weights=val_sample_weights,
batch_size=batch_size,
steps_per_epoch=validation_steps,
callbacks=callbacks,
verbose=0,
mode=ModeKeys.TEST,
validation_in_fit=True,
prepared_feed_values_from_dataset=(val_iterator is not None),
steps_name='validation_steps')
if not isinstance(val_results, list):
val_results = [val_results]
epoch_logs = cbks.make_logs(
model, epoch_logs, val_results, mode, prefix='val_')
if val_iterator and epoch < epochs - 1:
_reinitialize_iterator(val_iterator, model._distribution_strategy)
if mode == ModeKeys.TRAIN:
# Epochs only apply to `fit`.
callbacks.on_epoch_end(epoch, epoch_logs)
# Reinitialize dataset iterator for the next epoch.
if reset_dataset_after_each_epoch and epoch < epochs - 1:
_reinitialize_iterator(input_iterator, model._distribution_strategy)
model._successful_loop_finish = True
callbacks._call_end_hook(mode)
if model._distribution_strategy:
if model._compile_distribution:
# TODO(priyag, psv): Copy back metrics to the original model as well?
distributed_training_utils_v1._copy_weights_to_original_model(model, mode)
scope.__exit__(None, None, None)
if mode == ModeKeys.TRAIN:
return model.history
return results
def _get_model_feed(model, mode):
if mode == ModeKeys.PREDICT:
feed = model._feed_inputs
else:
feed = (
model._feed_inputs + model._feed_targets + model._feed_sample_weights)
return feed
def _print_train_info(num_samples_or_steps, val_samples_or_steps, is_dataset):
increment = 'steps' if is_dataset else 'samples'
msg = 'Train on {0} {increment}'.format(
num_samples_or_steps, increment=increment)
if val_samples_or_steps:
msg += ', validate on {0} {increment}'.format(
val_samples_or_steps, increment=increment)
print(msg)
def _get_num_samples_or_steps(ins, batch_size, steps_per_epoch):
"""Returns total number of samples (when training in batch mode) or steps."""
if steps_per_epoch:
return steps_per_epoch
return training_utils_v1.check_num_samples(ins, batch_size, steps_per_epoch,
'steps_per_epoch')
def _prepare_feed_values(model, inputs, targets, sample_weights, mode):
"""Prepare feed values to the model execution function.
Args:
model: Model to prepare feed values for.
inputs: List or dict of model inputs.
targets: Optional list of model targets.
sample_weights: Optional list of sample weight arrays.
mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.
Returns:
Feed values for the model in the given mode.
"""
if model._distribution_strategy:
if isinstance(inputs, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)):
inputs = distributed_training_utils_v1.get_iterator(
inputs, model._distribution_strategy)
def get_distributed_inputs():
return distributed_training_utils_v1._prepare_feed_values(
model, inputs, targets, sample_weights, mode)
# In the eager case, we want to call the input method per step, so return
# a lambda from here that can be called. Note that this is applicable only
# in Distribution Strategy case as it follows the same code path for both
# eager and graph modes.
# TODO(priyag,omalleyt): Either we should move the training DS with
# IteratorBase to use training_generator code path, or figure out how to
# set a symbolic Iterator out of a Dataset when in eager mode.
if context.executing_eagerly():
return get_distributed_inputs
else:
return get_distributed_inputs()
if isinstance(inputs, (dataset_ops.DatasetV1, dataset_ops.DatasetV2,
iterator_ops.Iterator)):
inputs, targets, sample_weights = model._standardize_user_data(
inputs,
extract_tensors_from_dataset=True)
inputs = training_utils_v1.ModelInputs(inputs).as_list()
targets = list(targets or [])
sample_weights = list(sample_weights or [])
ins = inputs + targets + sample_weights
if mode == ModeKeys.TRAIN and not isinstance(
backend.symbolic_learning_phase(), int):
ins += [True] # Add learning phase value.
return ins
def _get_iterator(inputs, distribution_strategy=None):
if distribution_strategy:
return distributed_training_utils_v1.get_iterator(
inputs, distribution_strategy)
return training_utils_v1.get_iterator(inputs)
def _reinitialize_iterator(iterator, distribution_strategy=None):
if distribution_strategy:
distributed_training_utils_v1.initialize_iterator(
iterator, distribution_strategy)
else:
training_utils_v1.initialize_iterator(iterator)
def _make_execution_function(model, mode):
"""Makes function to run one step of model execution."""
if model._distribution_strategy:
return distributed_training_utils_v1._make_execution_function(model, mode)
return model._make_execution_function(mode)
def _update_sample_weight_mode(model, mode, inputs):
"""Updates the sample_weight_mode of a given model."""
# Add a quick return to prevent us from calling model._feed_targets that
# accesses certain model properties that may not be set in the `PREDICT` mode.
if mode == ModeKeys.PREDICT:
return
sample_weights = None
# `inputs` is the model's inputs + targets + sample_weights +
# learning phase placeholder if specified. To update the sample_weight_mode
# we need to determine if the user has passed sample weights as part of the
# input.
if not callable(inputs):
sample_weights = inputs[len(model._feed_inputs) + len(model._feed_targets):]
has_learning_phase_pl = (mode == ModeKeys.TRAIN and
not isinstance(backend.symbolic_learning_phase(),
int))
if has_learning_phase_pl:
sample_weights = sample_weights[:-1]
model._update_sample_weight_modes(sample_weights=sample_weights)
# Call the DistributionStrategy specific function to update the
# sample_weight_mode on the model.
if model._distribution_strategy:
distributed_training_utils_v1._update_sample_weight_modes(model, mode,
sample_weights)
# For backwards compatibility for internal users of these loops.
fit_loop = functools.partial(model_iteration, mode=ModeKeys.TRAIN)
test_loop = functools.partial(
model_iteration, mode=ModeKeys.TEST, shuffle=False)
predict_loop = functools.partial(
model_iteration, mode=ModeKeys.PREDICT, shuffle=False)
class ArrayLikeTrainingLoop(training_utils_v1.TrainingLoop):
"""TrainingLoop that handle inputs like array.
This is the default handler for most of the input data types, includes
symbolic tensors or Numpy array-like, Datasets and iterators in graph mode
(since they generate symbolic tensors). This Function is used to handle model
with `run_eagerly` = False.
"""
def fit(self,
model,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_freq=1,
**kwargs):
batch_size = model._validate_or_infer_batch_size(batch_size,
steps_per_epoch, x)
x, y, sample_weights = model._standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size,
check_steps=True,
steps_name='steps_per_epoch',
steps=steps_per_epoch,
validation_split=validation_split,
shuffle=shuffle)
if validation_data:
val_x, val_y, val_sample_weights = model._prepare_validation_data(
validation_data, batch_size, validation_steps)
elif validation_split and 0. < validation_split < 1.:
(x, y, sample_weights, val_x, val_y, val_sample_weights
) = training_utils_v1.split_training_and_validation_data(
x, y, sample_weights, validation_split)
else:
if validation_steps:
raise ValueError('`validation_steps` should not be specified if '
'`validation_data` is None.')
val_x, val_y, val_sample_weights = None, None, None
return fit_loop(
model,
inputs=x,
targets=y,
sample_weights=sample_weights,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_x,
val_targets=val_y,
val_sample_weights=val_sample_weights,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
validation_freq=validation_freq,
steps_name='steps_per_epoch')
def evaluate(self,
model,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None,
callbacks=None,
**kwargs):
batch_size = model._validate_or_infer_batch_size(batch_size, steps, x)
x, y, sample_weights = model._standardize_user_data(
x,
y,
sample_weight=sample_weight,
batch_size=batch_size,
check_steps=True,
steps_name='steps',
steps=steps)
return test_loop(
model,
inputs=x,
targets=y,
sample_weights=sample_weights,
batch_size=batch_size,
verbose=verbose,
steps=steps,
callbacks=callbacks)
def predict(self,
model,
x,
batch_size=None,
verbose=0,
steps=None,
callbacks=None,
**kwargs):
batch_size = model._validate_or_infer_batch_size(batch_size, steps, x)
x, _, _ = model._standardize_user_data(
x, check_steps=True, steps_name='steps', steps=steps)
return predict_loop(
model,
x,
batch_size=batch_size,
verbose=verbose,
steps=steps,
callbacks=callbacks)
| apache-2.0 |
JeanKossaifi/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 247 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
pierrelapointe/scancode-toolkit | src/textcode/strings.py | 3 | 9395 | #
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import os
import string
import re
from commoncode.text import toascii
"""
Extract ASCII strings from a (possibly) binary string.
Use character translations tables for this, replacing all non-printable
characters by a newline char.
Then split on lines, yield these lines filtering out junk strings.
This is similar to what GNU Binutils strings does.
TODO: Add support for some additional and common strings-inbinary encodings
such ass UTC-16 in Windows binary executables.
"""
"""
Definition of non printable text: Remove digit,letters, punctuation and white
spaces, all the rest is junk.
Note: we replace also \r and \f with a newline. Since \r will be replaced by a
new line, some empty lines are possible, which is not a problem.
The fact that strings could be null-terminated is handled since the null is
also replaced by a newline.
The translation table is:
0123456789abcdefghijklmnopqrstuvwxyz
ABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ \n
"""
allchars = string.maketrans('', '')
non_printable = string.translate(allchars, allchars,
string.digits
+ string.letters
+ string.punctuation
+ ' \t\n')
"""
This create a translation table to replace junk chars with a newline char
"""
printable = string.maketrans(non_printable, '\n' * len(non_printable))
# this is heart of the code... a one liner
def strings(binary_string):
"""
Return a list of strings extracted from a binary string.
"""
return lines(remove_non_printable(binary_string))
def lines(s):
for x in s.splitlines():
yield x
def remove_non_printable(binary_string):
"""
Returns an ASCII printable string for a binary string, removing all non
printable characters.
"""
return binary_string.translate(printable)
def file_strings(location, buff_size=1024 * 1024):
"""
Process (eventually large) files in chunks and yield ASCII strings found
in file as location, encoded as Unicode.
"""
file_size = os.path.getsize(location)
count = 0
with open(location, 'rb') as f:
while True:
start = f.tell()
buf = f.read(buff_size)
count += 1
if not buf or not buf.strip():
break
s = remove_non_printable(buf)
if file_size >= buff_size * count:
# if the last char is not a '\n' we need to backtrack to avoid
# truncating lines in the middle
last_ln_end = s.rfind('\n')
# what if we did not find any??
# in this case we likely need to read forward instead of
# backward since this is a very unlikely event, we are just
# yielding what we have
if last_ln_end != -1:
to_truncate = len(s) - last_ln_end
back_pos = start - to_truncate
f.seek(back_pos, os.SEEK_CUR)
s = s[:last_ln_end + 1]
for l in s.splitlines():
ls = l.strip()
if ls:
yield ls
#
# Junk strings filtering
#
# Filters accept or reject a string: a filtering function returns True if the
# strings needs to be filtered out
non_chars = string.translate(allchars, allchars, string.letters)
letters = string.maketrans(non_chars, ' ' * len(non_chars))
def filter_string(S, min_length=2):
"""
Filters strings composed of :
* only one repeated character
* only short tokens
"""
tok = del_special(S)
tok = tok.translate(letters)
tok = tok.strip().lower().split()
if S:
repeat = S[0]
else:
repeat = ' '
return (all(len(x) <= min_length for x in tok)
or all(x == repeat for x in list(S)))
def filter_strict(S):
return filter_string (S, min_length=4)
# Ensure certain short strings are not filtered out
token_to_keep = set(
(
# elf related
'gnu',
'gnu as',
'elf',
'rel',
'dyn',
'jcr',
'dot',
'jss',
'plt',
'bss',
'jcr',
'end',
'die',
# license related
'gpl',
'mit',
'bsd',
'(c)',
)
)
def to_keep(S):
"""
Keeps strings that are in a reference set.
"""
return S.lower() in token_to_keep
def is_good(S, filt=filter_string):
"""
Returns True if if string is a keeper or False if filtered.
"""
if not S:
return False
return to_keep(S) or not filt(S)
#
# transformers change the content of strings
#
# TODO: add c++ demangling, etc)
def del_special(S):
"""
Replace verbatim white space lien endings and tabs (\\n, \\r, \\t) that
may exist as-is as literals in the extracted string by a space.
"""
return S.replace('\\r', ' ').replace('\\n', ' ').replace('\\t', ' ')
def is_mangled_ccp(S): # @UnusedVariable
return False
def demangle_cpp(S):
return S
def is_mangled_java(S): # @UnusedVariable
return False
def demangle_java(S):
return S
def strings_in_file(location, filt=filter_string):
"""
Yield ASCCI strings encoded as Unicode extracted from a file at location.
"""
for s in file_strings(location):
if is_good(s, filt):
s = s.strip()
if s:
yield toascii(s)
# http://code.activestate.com/recipes/466293-efficient-character-escapes-decoding/?in=user-2382677
# classifiers detect specific patterns in the strings
# TODO: add path detection, etc
# Detect paths and file names
def FILE_RE():
return re.compile('^[\w_\-]+\.\w{1,4}$', re.IGNORECASE)
def is_file(S):
"""
Return True is S looks like a file name.
Exmaple: dsdsd.dll
"""
return 'file-name' if re.match(FILE_RE(), S) else None
def SO_RE():
return re.compile('^[\w_\-]+\.so\.[0-9]+\.*.[0-9]*$', re.IGNORECASE)
def is_shared_object(S):
"""
Return True is S looks like a shared object file.
Example: librt.so.1
"""
return 'elf-shared-object' if re.match(SO_RE(), S) else None
def POSIXPATH_RE():
return re.compile('^[\w_\-]+\.so\.[0-9]+\.*.[0-9]*$', re.IGNORECASE)
def is_posix_path(S):
"""
Return True is S looks like a posix path.
Example: /usr/lib/librt.so.1 or usr/lib
"""
return re.match(POSIXPATH_RE(), S)
def RELATIVE_RE():
return re.compile('^[\w_\-]+\.so\.[0-9]+\.*.[0-9]*$', re.IGNORECASE)
def is_relative_path(S):
"""
Return True is S looks like a relative posix path.
Example: usr/lib/librt.so.1 or ../usr/lib
"""
return re.match(POSIXPATH_RE(), S)
def WINPATH_RE():
return re.compile('^[\w_\-]+\.so\.[0-9]+\.*.[0-9]*$', re.IGNORECASE)
def is_win_path(S):
"""
Return True is S looks like a win path.
Example: c:\usr\lib\librt.so.1.
"""
return re.match(WINPATH_RE(), S)
def is_c_source(S):
"""
Return True is S looks like a C source path.
Example: this.c
FIXME: should get actual algo from contenttype.
"""
return S.endswith(('.c', '.cpp', '.hpp', '.h',))
def is_java_source(S):
"""
Return True is S looks like a Java source path.
Example: this.java
FIXME: should get actual algo from contenttype.
"""
return S.endswith(('.java', '.jsp', '.aj',))
def is_GLIBC_ref(S):
"""
Return True is S looks like a reference to GLIBC as typically found in
Elfs.
"""
return '@@GLIBC' in S
def JAVAREF_RE():
return re.compile('^.*$', re.IGNORECASE)
def is_java_ref(S): # @UnusedVariable
"""
Return True is S looks like a reference to a java class or package in a
class file.
"""
return False
class BinaryStringsClassifier(object):
"""
Classify extracted strings as good or bad/junk.
The types of strings that are recognized include:
file
file_path
junk
text
"""
# TODO: Implement me
| apache-2.0 |
ElettraSciComp/STP-Core | STP-Core/postprocess/polarfilter.py | 1 | 3340 | ###########################################################################
# (C) 2016 Elettra - Sincrotrone Trieste S.C.p.A.. All rights reserved. #
# #
# #
# This file is part of STP-Core, the Python core of SYRMEP Tomo Project, #
# a software tool for the reconstruction of experimental CT datasets. #
# #
# STP-Core is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# STP-Core is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License #
# for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with STP-Core. If not, see <http://www.gnu.org/licenses/>. #
# #
###########################################################################
#
# Author: Francesco Brun
# Last modified: April, 6th 2017
#
from numpy import float32, amin, amax, sqrt, max, pad
import imp, inspect, os
import polarfilters
import cv2
def polarfilter(im, polarfilt_opt):
"""Post-process a reconstructed image with a filter in polar coordinates.
Parameters
----------
im : array_like
Image data as numpy array.
filt_opt : string
String containing filter method and the related parameters.
"""
# Get method and args:
method, args = polarfilt_opt.split(":", 1)
# The "none" filter means no filtering:
if (method != "none"):
# Dinamically load module:
path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
str = os.path.join(path, "polarfilters", method + '.py')
m = imp.load_source(method, str)
# Convert to 8-bit or 16-bit:
filt_method, filt_args = polarfilt_opt.split(":", 1)
# Get original size:
origsize = im.shape
# Up-scaling:
im = cv2.resize(im, None, 2, 2, cv2.INTER_CUBIC)
rows, cols = im.shape
cen_x = im.shape[1] / 2
cen_y = im.shape[0] / 2
# To polar:
im = cv2.linearPolar(im, (cen_x, cen_y), amax([rows,cols]), cv2.INTER_CUBIC)
# Padding:
cropsize = im.shape
im = pad(im, ((origsize[0] / 4, origsize[0] / 4), (origsize[1] / 2, 0)), 'symmetric')
# Call the filter dynamically:
im = getattr(m, method)(im, args)
# Crop:
im = im[origsize[0] / 4:origsize[0] / 4 + cropsize[0],origsize[1] / 2:origsize[1] / 2 + cropsize[1]]
# Back to cartesian:
im = cv2.linearPolar(im, (cen_x, cen_y), amax([rows,cols]), cv2.INTER_CUBIC + cv2.WARP_INVERSE_MAP)
# Down-scaling to original size:
im = cv2.resize(im, (origsize[0], origsize[1]), interpolation = cv2.INTER_CUBIC)
return im
| gpl-3.0 |
thanhan/subjective-crowd-hcomp16 | learner.py | 1 | 38536 | import analysis
import numpy as np
from sklearn import linear_model
from sklearn.preprocessing import PolynomialFeatures
from sklearn import cross_validation
from sklearn.pipeline import Pipeline
import sklearn
#from sklearn.kernel_ridge import KernelRidge
from sklearn import svm
from sklearn import gaussian_process
from scipy.special import digamma
import scipy.stats
from sklearn.metrics import roc_auc_score
import copy
from scipy.stats.stats import pearsonr
import matplotlib.pyplot as plt
list_freq = [422400, 729600, 1036800, 1497600, 1958400, 2457600]
def preprocess(dic, app = 'angry_birds'):
"""
given dic: conf -> labels
return the design matrix, each row = (features, target)
"""
a = np.zeros((24, 3))
i = 0
for cores in [1,2,3,4]:
for freq in list_freq:
m = np.mean(dic[app, cores, freq, 0])
a[i,:] = np.asarray([cores, freq, m])
i+= 1
return a
def learn(a):
data = a[:,0:-1]
target = a[:,-1]
LR = linear_model.LinearRegression()
scores = cross_validation.cross_val_score(LR, data, target, cv=4, scoring = 'mean_absolute_error')
print 'LR:', -scores.mean()
LR_poly2 = Pipeline([('poly', PolynomialFeatures(degree=3)),
('linear', linear_model.LinearRegression())])
scores = cross_validation.cross_val_score(LR_poly2, data, target, cv=4, scoring = 'mean_absolute_error')
print 'LR_poly3:', -scores.mean()
Ridge = linear_model.Ridge (alpha = 0)
scores = cross_validation.cross_val_score(Ridge, data, target, cv=4, scoring = 'mean_absolute_error')
print 'Ridge:', -scores.mean()
KR = KernelRidge()
scores = cross_validation.cross_val_score(KR, data, target, cv=4, scoring = 'mean_absolute_error')
print 'KR:', -scores.mean()
SVR = clf = svm.SVR()
scores = cross_validation.cross_val_score(SVR, data, target, cv=4, scoring = 'mean_absolute_error')
print 'SVR:', -scores.mean()
GP = gaussian_process.GaussianProcess(theta0=1e-2, thetaL=1e-4, thetaU=1e-1)
scores = cross_validation.cross_val_score(GP, data, target, cv=4, scoring = 'mean_absolute_error')
print 'GP:', -scores.mean()
app = [ 'angry_birds', 'youtube', 'gladiator', 'chrome_cnn',
'epic_citadel', 'facebook', 'photoshop', 'compubench_rs_particles',
'compubench_rs_gaussian', 'compubench_rs_julia', 'compubench_rs_facedetection', 'compubench_rs_ambiant']
gpu = [200, 320, 389, 462.4, 578]
def create_features(conf):
f = [0]*12 + [0,0,0,1]
for i, a in enumerate(app):
if a == conf[0]:
f[i] = 1
f[12] = conf[1] * 1.0 / 4
f[13] = conf[2] * 1.0 / 2457600
f[14] = gpu[conf[3]] *1.0 / 578
f[15] = 1.0
return f
class model:
def __init__(self, data, lambda_w = 0, lambda_v = 0, A = 0, B = 0):
"""
data is an array of [wid, question, url, rating, time, ip, application, cpu cores, cpu freq, gpu index]
e.g:
['A2XXXXXXXXXXX', 'Question Random Assignment - Text 18',
'www.youtube.com/embed/ZqD83lS8exs?wmode=transparent',
'1 - Very Dissatisfied', 'Thu Jul 31 02:56:58 GMT 2014',
'11.11.1.111', 'compubench_rs_particles', '3', '2457600', '0']
lambda: weight for regularization of W and V
A, B: weights (prior) on theta
organize:
- F: a features matrix, row i = list of features for item i
- L: crowd label, elem i = (list of W, list of L)
"""
self.data = data
self.dic_conf_wl = analysis.get_dic_conf_wl(data)
n = len(self.dic_conf_wl)
self.list_conf = self.dic_conf_wl.keys()
self.F = []
self.empi_mean = []
self.empi_var = []
for conf in self.list_conf:
f = create_features(conf)
self.F.append(f)
labels = self.dic_conf_wl[conf][1]
self.empi_mean.append( np.mean(labels) )
self.empi_var.append ( np.var(labels) )
self.F = np.asarray(self.F)
self.L = []
for conf in self.list_conf:
labels = self.dic_conf_wl[conf]
self.L.append(labels)
self.n = len(self.L) # number of items
self.m = len(self.F[0]) # number of features
# build dic_w_il
self.dic_w_il = {}
for i in range(self.n):
workers, labels = self.L[i]
for w, l in zip(workers, labels):
if w not in self.dic_w_il: self.dic_w_il[w] = []
self.dic_w_il[w].append( (i,l))
self.ep = 1e-100
self.lambda_w = lambda_w
self.lambda_v = lambda_v
self.A = A
self.B = B
def get_mean(self, i):
return self.F[i].dot(self.w)
def get_std(self, i):
return np.exp( self.F[i].dot(self.v) )
def get_var(self, i):
return pow( self.get_std(i), 2)
def spam_dist(self, l):
"""
distribution of labels from spammers
"""
return scipy.stats.norm(3,self.s).pdf(l)
def e_step(self):
"""
evaluate posterior over Z
"""
self.pt = []
for i in range(self.n):
self.pt.append([])
workers, labels = self.L[i]
for w, l in zip(workers, labels):
p1 = scipy.stats.norm.pdf(l, loc = self.get_mean(i), scale = self.get_std(i) ) * self.theta[w]
p0 = self.spam_dist(l) * (1-self.theta[w])
p = p1 *1.0/ (p0 + p1)
self.pt[i].append(p)
def expected_ll(self, w, v):
"""
return expected log likelihood
"""
res = 0
for i in range(self.n):
workers, labels = self.L[i]
for worker, l, pt1 in zip(workers, labels, self.pt[i]):
pt0 = 1 - pt1
#theta = self.theta[worker]
ll0 = np.log( self.spam_dist(l) ) # + np.log(1-theta)
mean = self.F[i].dot(w)
std = np.exp(self.F[i].dot(v))
if std < self.ep: std = self.ep
ll1 = scipy.stats.norm.logpdf(l, loc = mean, scale = std )# + np.log(theta)
res += pt0*ll0 + pt1*ll1
#regularization
for i in range(self.m-1):
res -= self.lambda_w * w[i]*w[i] + self.lambda_v * v[i]*v[i]
return res
def grad_expected_ll(self, w, v):
gw = np.zeros( (self.m,) )
gv = np.zeros( (self.m,) )
for i in range(self.n):
workers, labels = self.L[i]
for worker, l, pt1 in zip(workers, labels, self.pt[i]):
wtc = self.F[i].dot(w)
sigma = np.exp(self.F[i].dot(v))
if sigma < self.ep: sigma = self.ep
update_w = pt1*(l-wtc)/pow(sigma,2)*self.F[i]
gw += update_w
update_v = pt1*(-self.F[i] + pow(l-wtc,2)/pow(sigma,2)*self.F[i])
gv += update_v
for i in range(self.m-1):
gw[i] -= 2 * self.lambda_w * w[i]
gv[i] -= 2 * self.lambda_v * v[i]
return np.hstack( (gw, gv) )
def check_grad(self, ep = 0.0000001, check_range = None):
if check_range==None: check_range = range(self.m)
w = np.random.rand(self.m) - 0.5
v = np.random.rand(self.m) - 0.5
a = self.expected_ll(w, v)
fw = np.zeros(self.m)
fv = np.zeros(self.m)
for i in check_range:
x = np.zeros(self.m)
x[i] = ep
fw[i] = (self.expected_ll(w + x, v) - a ) / ep
fv[i] = (self.expected_ll(w, v + x) - a ) / ep
print w
print v
print 'calculated grad = ', zip(range(self.m*2), self.grad_expected_ll(w, v))
print 'finite diff grad = ', zip(range(self.m*2), np.hstack((fw, fv)))
def m_step_theta(self):
"""
set theta_j to max expected ll
"""
for w in self.dic_w_il: self.theta[w] = 0
for i in range(self.n):
workers, labels = self.L[i]
for w, l, pt1 in zip(workers, labels, self.pt[i]):
self.theta[w] += pt1
for w in self.dic_w_il:
num = len(self.dic_w_il[w])
r = self.theta[w] * 1.0 / num
self.theta[w] = r
#if (r < 0.85 ):
# self.theta[w] = r # no regularize
#else:
# # regularize
# self.theta[w] = (self.theta[w] * 1.0 + self.A) / ( len( self.dic_w_il[w]) + self.A + self.B)
# set self.s = sd of spam dist
s = 0
sw = 0
for i in range(self.n):
workers, labels = self.L[i]
for w, l, pt1 in zip(workers, labels, self.pt[i]):
s += pow(l - 3, 2)*(1-self.theta[w])
sw += 1- self.theta[w]
if sw > 0:
self.s = pow(s*1.0/sw, 0.5)
def m_step_wv(self, update_v = True):
"""
maximize w and v
"""
m = self.m
f = lambda x: -self.expected_ll(x[:m], x[m:])
fp = lambda x: -self.grad_expected_ll(x[:m], x[m:])
x0 = np.hstack( (self.w, self.v) )
#opt_method = 'Nelder-Mead'
opt_method = 'BFGS'
res = scipy.optimize.minimize(f, x0, method=opt_method, jac=fp)
#print res
self.w = res.x[:m]
if update_v:
self.v = res.x[m:]
def m_step(self, update_v = True):
"""
maximize expected ll of w, v, theta
"""
self.m_step_theta()
self.m_step_wv(update_v)
def init_wv(self):
"""
init the params w and v
using the results of linear regression on empirical
"""
self.lr = sklearn.linear_model.LinearRegression(fit_intercept = False)
self.lr.fit(self.F, self.empi_mean)
self.w = self.lr.coef_
self.lr.fit(self.F, np.log( pow(np.asarray(self.empi_var), 0.5)) )
self.v = self.lr.coef_
def init_em(self, h_theta = 0.8):
"""
init w, v, theta
"""
self.s = 2 # sd of spam distribution
self.w = np.zeros((self.m,))
self.v = np.zeros((self.m,))
self.theta = {}
dic_ul = analysis.get_dic_url_labels(self.data)
dic_w, dic_mean = analysis.agreement(self.data, dic_ul)
for w in self.dic_w_il:
#self.theta[w] = 1 - h_theta*abs(dic_mean[w])
self.theta[w] = h_theta
self.init_wv()
#self.pt = []
#for i in range(self.n):
# self.pt.append([])
# workers, labels = self.L[i]
# for w, l in zip(workers, labels):
# self.pt[i].append(0.99)
def em(self, w_it = 3, v_it = 1):
"""
"""
# iterate
for it in range(w_it):
self.e_step()
update_v = it < v_it
self.m_step(update_v)
def get_var_f(self, f):
"""
return variance for a feature vector
"""
return pow ( np.exp ( f.dot(self.v) ), 2)
def predict(self, list_conf):
"""
predict mean and var of new conf
"""
res_mean = []
res_var = []
for conf in list_conf:
f = np.asarray( create_features(conf) )
mean = f.dot(self.w)
var = self.get_var_f(f)
res_mean.append(mean)
res_var.append(var)
return (res_mean, res_var)
def spam_score(self, workers):
"""
return prob of the worker being a spammer
"""
res = []
for w in workers:
res.append(1 - self.theta[w])
return res
def get_dic(self, w, v):
"""
return dics of conf to mean and var
using prediction by w and v
"""
dic_mean = {}
dic_var = {}
for i in range(self.n):
conf = tuple(self.list_conf[i])
f = self.F[i]
mean = f.dot(w)
var = pow( np.exp(f.dot(v)), 2)
dic_mean[conf] = mean
dic_var[conf] = var
return (dic_mean, dic_var)
class LR:
"""
baseline: linear regression
"""
def __init__(self, data, hetero = False):
self.dic_conf_wl = analysis.get_dic_conf_wl(data)
n = len(self.dic_conf_wl)
list_conf = self.dic_conf_wl.keys()
self.F = []
self.empi_mean = []
self.empi_var = []
for conf in list_conf:
f = create_features(conf)
self.F.append(f)
labels = self.dic_conf_wl[conf][1]
self.empi_mean.append( np.mean(labels) )
self.empi_var.append ( np.var(labels) )
self.lr_mean = sklearn.linear_model.LinearRegression(fit_intercept = False)
self.lr_mean.fit(self.F, self.empi_mean)
self.const_var = np.sum((self.lr_mean.predict(self.F) - self.empi_mean)**2) *1.0/ (n-2)
self.lr_var = sklearn.linear_model.LinearRegression(fit_intercept = False)
#self.lr_var.fit(self.F, self.empi_var )
#self.lr_var.fit(self.F, np.log( pow(np.asarray(self.empi_var), 0.5)))
self.hetero = hetero
def predict(self, list_conf):
"""
predict mean and var of new conf
"""
self.tF = []
for conf in list_conf:
f = create_features(conf)
self.tF.append(f)
res_mean = self.lr_mean.predict(self.tF)
if self.hetero:
res_var = self.lr_var.predict(self.tF)
#res_var = pow( np.exp(self.lr_var.predict(self.tF)), 2)
else:
res_var = [self.const_var] * len(list_conf)
return (res_mean, res_var)
class baseline_spam(model):
"""
baselines for spam detection
"""
def __init__(self, data):
model.__init__(self, data)
#get spam score
self.ss = {}
for w in self.dic_w_il:
self.ss[w] = 0
for i, l in self.dic_w_il[w]:
# difference between label and average label
self.ss[w] += np.abs( l - np.mean(self.L[i][1]) )
self.ss[w] = self.ss[w] * 1.0 / len(self.dic_w_il[w])
#normalize:
max_score = max(self.ss.values())
for w in self.ss:
self.ss[w] = self.ss[w] * 1.0 / max_score
def spam_score(self, workers):
res = []
for w in workers:
res.append(self.ss[w])
return res
empirical_spam = [0.13, 0.25, 0.22, 0.27, 0.14]
def plot_empi_spam():
fig, ax = plt.subplots()
ax.bar(np.asarray([1,2,3,4,5]) - 0.5, empirical_spam)
ax.set_xlabel('Rating')
ax.set_ylabel('Proportion')
ax.set_xticks(np.asarray([1,2,3,4,5]))
class eval():
"""
evaluate
"""
def __init__(self, data, ptrain = 0.6, pval = 0.2, prw = 0.1, prl = 0.8, ptr = 1.0, plt = 1.0, pwk = 0.0, rand_seed = 1234, noise = 'empirical', bad_guys = []):
"""
ptrain = train set
pval = validation set
prw = proportion of random workers (spammers)
prl = proportion of random labels (how often a random worker gives a random label)
ptr = proportion of train conf
plt = proportion of labels for each conf in the train set
pwk = proportion of workers to be removed (remove the ones with high diff)
"""
self.data = copy.deepcopy(data)
self.pwk = pwk
self.del_wk()
self.dic_conf_wl = analysis.get_dic_conf_wl(self.data)
self.list_conf = self.dic_conf_wl.keys()
#self.rs = np.random.RandomState(1)
#self.rs.shuffle(self.list_conf)
self.rs = np.random.RandomState(rand_seed)
self.rs.shuffle(self.list_conf)
self.n = len(self.list_conf)
self.n_train = int(ptrain * self.n) # number of total train conf
self.n_given = int(self.n_train * ptr) # number of train conf given to method
self.train_conf = self.list_conf[:self.n_given]
self.n_val = int(pval * self.n) # number of total validation conf
self.val_conf = self.list_conf[self.n_train:self.n_train+self.n_val]
self.test_conf = self.list_conf[self.n_train+self.n_val:]
# get gold L for test
self.gold_mean = []; self.gold_var = []; self.gold_num = []
for conf in self.test_conf:
labs = self.dic_conf_wl[conf][1]
workers = self.dic_conf_wl[conf][0]
labels = []
for l, w in zip(labs, workers):
if w not in bad_guys:
labels.append(l)
self.gold_mean.append( np.mean(labels) )
self.gold_var.append ( np.var(labels) )
self.gold_num.append( len(labels) )
# also get gold L for train
self.train_mean = []; self.train_var = []; self.train_num = []
for conf in self.train_conf:
labels = self.dic_conf_wl[conf][1]
self.train_mean.append( np.mean(labels) )
self.train_var.append ( np.var(labels) )
self.train_num.append( len(labels) )
# also get gold L for valildataion
self.val_mean = []; self.val_var = []; self.val_num = []
for conf in self.val_conf:
labels = self.dic_conf_wl[conf][1]
self.val_mean.append( np.mean(labels) )
self.val_var.append ( np.var(labels) )
self.val_num.append( len(labels) )
self.plt = plt
self.get_train_data()
#inject noise
train_workers = analysis.get_list_workers(self.train_data)
self.rs.shuffle(train_workers)
self.n_random_workers = int(prw * len(train_workers))
self.random_workers = train_workers[:self.n_random_workers]
self.train_workers = train_workers
self.noise = noise
self.prl = prl
self.inject_noise()
def rand_rating(self):
if self.noise == 'uniform':
return self.rs.randint(1,6)
elif self.noise == 'empirical':
return np.nonzero(self.rs.multinomial(1, empirical_spam))[0][0]+1
else:
raise "unknown noise"
def inject_noise(self):
for i in range(len(self.train_data)):
w = self.train_data[i][0]
if w in self.random_workers:
if np.random.uniform() < self.prl:
self.train_data[i][3] = str(self.rand_rating())
def get_train_data(self):
self.train_data = []
dic_conf_num = {}# conf-> number of crowd labels this conf got
for d in self.data:
conf = analysis.get_conf(d)
if conf in self.train_conf:
if conf not in dic_conf_num: dic_conf_num[conf] = 0
if dic_conf_num[conf] > self.plt * len(self.dic_conf_wl[conf][0]): continue
dic_conf_num[conf] += 1
self.train_data.append(d)
def del_wk(self):
"""
remove workers with high deviation
remove a proportion of self.pwk workers
"""
if self.pwk == 0.0: return
dic_ul = analysis.get_dic_url_labels(self.data)
dic_w, dic_mean = analysis.agreement(self.data, dic_ul)
self.workers = sorted(dic_mean.items(), key = lambda i : abs(i[1]), reverse = False)
nwk = len(self.workers)
keep_workers = list( zip(*self.workers[:int((1-self.pwk) * nwk)])[0]) # list of workers to keep
new_data = []
for i in self.data:
if i[0] in keep_workers:
new_data.append(i)
self.data = new_data
def get_mae(self, a, b):
if ( len(a) != len(b) ) : raise "len not equal"
res = 0
for x,y in zip(a,b):
res += np.abs(x-y)
res = res * 1.0 / len(a)
return res
def eval(self, model):
"""
model has beeen trained
model has a predict method
"""
res_mean, res_var = model.predict(self.test_conf)
mae_mean = self.get_mae(res_mean, self.gold_mean)
mae_var = self.get_mae(res_var, self.gold_var)
#print "correlation: ", pearsonr(res_var, self.gold_var)
return [mae_mean, mae_var]
def eval_val(self, model):
"""
model has beeen trained
model has a predict method
evaluate on validation data
"""
res_mean, res_var = model.predict(self.val_conf)
mae_mean = self.get_mae(res_mean, self.val_mean)
mae_var = self.get_mae(res_var, self.val_var)
#print "correlation: ", pearsonr(res_var, self.gold_var)
return [mae_mean, mae_var]
def print_val(self, model):
res_mean, res_var = model.predict(self.val_conf)
mae_var = self.get_mae(res_var, self.val_var)
s = 0
for i,j in zip(res_var, self.val_var):
print i, j, i - j
s += (i-j)
print "s = ", s
def eval_all(self, em_it = 3):
"""
evaluate
"""
# LR
#lr = LR(self.train_data, hetero = True)
lr = LR(self.train_data, hetero = False)
eval_lr = self.eval(lr)
# nospam model
#ns = model_nospam(self.train_data)
#ns.init_em()
#ns.em(em_it)
#eval_ns = self.eval(ns)
# model
#new99 = model(self.train_data)
#new99.init_em(0.99)
#new99.em(em_it)
#eval_new99 = self.eval(new99)
new8 = model(self.train_data)
#new8.init_em(0.99)
new8.init_em(1)
new8.em(1,1)
eval_new8 = self.eval(new8)
# fix bias model
#fb = model_fixbias(self.train_data)
#fb.init_em()
#fb.em(em_it)
#eval_fb = self.eval(fb)
# variational model
var82 = model_var(self.train_data, 9.9, 0.1)
var82.init_em()
#var82.e_step()
var82.em(1,1)
eval_var82 = self.eval(var82)
#var191 = model_var(self.train_data, 19, 1)
#var191.init_em()
#var191.em(em_it)
#eval_var191 = self.eval(var191)
# spamer score
ss_baseline = self.detect_spammer(baseline_spam(self.train_data))
ss_new = self.detect_spammer(new8)
ss_var82 = self.detect_spammer(var82)
print "linear reg/baseline:", eval_lr, ss_baseline
#print "no spam model:", eval_ns
print "new model:", eval_new8, ss_new
#print "new model(fixbias)", eval_fb
print "var model", eval_var82, ss_var82
#return ([eval_lr, eval_new99, eval_new9, eval_ns, eval_fb, eval_var91, eval_var191], ss_baseline, ss_new)
#return ([eval_lr, eval_new8], ss_baseline, ss_new)
return ([eval_lr, eval_new8, eval_var82], ss_baseline, ss_new, ss_var82)
def detect_spammer(self, model):
"""
return AUC of the model in detecting the spammers.
model has a method spam_score(list_workers) that return the prob of being spammer
"""
# in self.train_workers, the first n_random_workers
if self.n_random_workers == 0:
return -1
score = model.spam_score(self.train_workers)
y = [1] * self.n_random_workers + [0] * (len(self.train_workers) - self.n_random_workers)
return roc_auc_score(y, score)
class model_constvar(model):
"""
same model with constant variance
"""
def __init__(self, data):
model.__init__(self,data)
self.std = 1
def get_std(self, i):
return self.std
def expected_ll(self, w):
"""
return expected log likelihood
"""
res = 0
for i in range(self.n):
workers, labels = self.L[i]
for worker, l, pt1 in zip(workers, labels, self.pt[i]):
pt0 = 1 - pt1
theta = self.theta[worker]
ll0 = np.log(self.spam_dist(l)) + np.log(1-theta)
mean = self.F[i].dot(w)
std = self.std
if std < self.ep: std = self.ep
ll1 = scipy.stats.norm.logpdf(l, loc = mean, scale = std ) + np.log(theta)
res += pt0*ll0 + pt1*ll1
return res
def grad_expected_ll(self, w):
gw = np.zeros( (self.m,) )
for i in range(self.n):
workers, labels = self.L[i]
for worker, l, pt1 in zip(workers, labels, self.pt[i]):
wtc = self.F[i].dot(w)
sigma = self.std
if sigma < self.ep: sigma = self.ep
update_w = pt1*(l-wtc)/pow(sigma,2)*self.F[i]
gw += update_w
return gw
def m_step_var(self):
s1 = 0
s2 = 0
for i in range(self.n):
workers, labels = self.L[i]
for worker, l, pt1 in zip(workers, labels, self.pt[i]):
wtx = self.F[i].dot(self.w)
s1 += pt1*pow(l-wtx,2)
s2 += pt1
self.var = s1*1.0/s2
def m_step(self):
"""
maximize theta, W and var
"""
self.m_step_theta()
# maximize W
m = self.m
f = lambda x: -self.expected_ll(x)
fp = lambda x: -self.grad_expected_ll(x)
x0 = self.w
#opt_method = 'Nelder-Mead'
opt_method = 'BFGS'
res = scipy.optimize.minimize(f, x0, method=opt_method, jac=fp)
print res
self.w = res.x
# maximize var
self.m_step_var()
class model_var(model):
"""
theta is hidden variable
inference by meanfield variational
"""
def __init__(self, data, A = 8.0, B = 2.0):
model.__init__(self, data)
self.A = A
self.B = B
def init_em(self):
"""
init params: w and v
init variational params alpha, beta, gamma
"""
self.s = 2
model.init_wv(self)
self.alpha = {}
self.beta = {}
for w in self.dic_w_il:
self.alpha[w] = self.A
self.beta[w] = self.B
self.gamma = []
for i in range(self.n):
workers, labels = self.L[i]
self.gamma.append([])
for w, l in zip(workers, labels):
self.gamma[i].append( self.A*1.0/(self.A+self.B) )
def update(self, a, b):
self.n_update += 1
self.change += np.abs(a-b)
def e_step(self, max_it = 10):
for it in range(max_it):
self.change = 0; self.n_update = 0
# update q(Z)
for i in range(self.n):
workers, labels = self.L[i]
for w, l, j in zip(workers, labels, range(len(workers))):
alpha = self.alpha[w]
beta = self.beta[w]
z0 = (self.spam_dist(l)) * np.exp( digamma(beta) - digamma(alpha+beta) )
z1 = scipy.stats.norm.pdf(l, loc = self.get_mean(i), scale = self.get_std(i) ) * np.exp( digamma(alpha) - digamma(alpha+beta) )
g = z1*1.0 / (z0+z1)
self.update(self.gamma[i][j], g)
self.gamma[i][j] = g
# update q(theta)
new_alpha = {}
new_beta = {}
for w in self.dic_w_il:
new_alpha[w] = self.A
new_beta[w] = self.B
for i in range(self.n):
workers, labels = self.L[i]
for w, l, g in zip(workers, labels, self.gamma[i]):
new_alpha[w] += (1-g)
new_beta[w] += g
for w in self.dic_w_il:
self.update(self.alpha[w], new_alpha[w])
self.alpha[w] = new_alpha[w]
self.update(self.beta[w], new_beta[w])
self.beta[w] = new_beta[w]
#
avg_change = self.change * 1.0/self.n_update
if avg_change < 0.01:break
def m_step(self, update_v = True):
self.pt = self.gamma
model.m_step_wv(self, update_v)
def spam_score(self, workers):
"""
return prob of being a spammer
"""
res = []
for w in workers:
a = self.alpha[w]- self.A; b = self.beta[w] - self.B
res.append( a * 1.0/(a+b) )
return res
class model_nospam(model):
def __init__(self, data):
model.__init__(self, data)
def e_step(self):
self.pt = []
for i in range(self.n):
self.pt.append([])
workers, labels = self.L[i]
for w, l in zip(workers, labels):
self.pt[i].append(1.0)
class test:
def __init__(self, data, n_train = 1):
self.data = copy.deepcopy(data)
self.reduce()
self.dic_conf_wl = analysis.get_dic_conf_wl(self.data)
self.list_conf = self.dic_conf_wl.keys()
#self.rs = np.random.RandomState(rand_seed)
#self.rs.shuffle(self.list_conf)
self.n = len(self.list_conf)
self.n_train = n_train
self.train_conf = self.list_conf[:self.n_train]
self.test_conf = self.list_conf[self.n_train:]
# get gold L for test
self.gold_mean = []; self.gold_var = []; self.gold_num = []
for conf in self.test_conf:
labels = self.dic_conf_wl[conf][1]
self.gold_mean.append( np.mean(labels) )
self.gold_var.append ( np.var(labels) )
self.gold_num.append( len(labels) )
# also get gold L for train
self.train_mean = []; self.train_var = []; self.train_num = []
for conf in self.train_conf:
labels = self.dic_conf_wl[conf][1]
self.train_mean.append( np.mean(labels) )
self.train_var.append ( np.var(labels) )
self.train_num.append( len(labels) )
self.get_train_data()
def reduce(self):
"""
del label so that each conf has ~ same number of L
"""
dic_conf_wl = analysis.get_dic_conf_wl(self.data)
dic_conf_num = {}
for conf in dic_conf_wl.keys():
labels = dic_conf_wl[conf][1]
dic_conf_num[conf] = len(labels)
new_data = []
for d in self.data:
conf = analysis.get_conf(d)
if dic_conf_num[conf] > 50:
dic_conf_num[conf] -= 1
else:
new_data.append(d)
self.data = new_data
def get_train_data(self):
self.train_data = []
for d in self.data:
conf = analysis.get_conf(d)
if conf in self.train_conf:
self.train_data.append(d)
def run(self, model, n_it = 1):
self.M = model(self.train_data)
self.M.init_em()
x = self.M.predict(self.train_conf)
self.M.em(n_it)
x1 = self.M.predict(self.train_conf)
self.print_res(self.train_var, x, x1)
def print_res(self, gold, x, x1):
sum_x = 0
sum_x1 = 0
for i in range(len(gold)):
#print i, gold[i], x[1][i], x1[1][i]
sum_x += x[1][i] - gold[i]
sum_x1 += x1[1][i] - gold[i]
e0 = eval([])
print 'mae x = ' , e0.get_mae(gold, x[1])
print 'mae x1 = ', e0.get_mae(gold, x1[1])
print 'sum x = ', sum_x, ' sum x1 = ', sum_x1
class model_fixbias(model):
def m_step_wv(self):
self.wm = []
self.wv = []
for i in range(self.n):
workers, labels = self.L[i]
m = len(labels)
s = 0
sw = 0
for w, l, pt1 in zip(workers, labels, self.pt[i]):
s += pt1 * l
sw += pt1
wmean = s * 1.0 / sw
self.wm.append(wmean)
s = 0
for w, l, pt1 in zip(workers, labels, self.pt[i]):
s += pt1 * pow(l - wmean, 2)
wvar = s * 1.0 / sw
self.wv.append(wvar)
self.lr = sklearn.linear_model.LinearRegression(fit_intercept = False)
self.lr.fit(self.F, self.wm)
self.w = self.lr.coef_
self.lr.fit(self.F, np.log( pow(np.asarray(self.wv), 0.5)) )
self.v = self.lr.coef_
class model_vf(model_var, model_fixbias):
def m_step(self):
self.pt = self.gamma
model_fixbias.m_step_wv(self)
class model_stoch(model):
"""
using stochastic gradient descent to optimize params
"""
def __init__(self, data, lr_w = 0.001, lr_v = 0.001):
model.__init__(self, data)
self.lr_w = lr_w
self.lr_v = lr_v
def m_step_wv(self):
"""
maximize w and v using SGD
"""
for it in range(50):
for i in range(self.n):
gw = np.zeros( (self.m,) )
gv = np.zeros( (self.m,) )
workers, labels = self.L[i]
for worker, l, pt1 in zip(workers, labels, self.pt[i]):
wtc = self.F[i].dot(self.w)
sigma = np.exp(self.F[i].dot(self.v))
if sigma < self.ep: sigma = self.ep
update_w = pt1*(l-wtc)/pow(sigma,2)*self.F[i]
gw += update_w
update_v = pt1*(-self.F[i] + pow(l-wtc,2)/pow(sigma,2)*self.F[i])
gv += update_v
self.w = self.w + self.lr_w * gw
self.v = self.v + self.lr_v * gv
#for i in range(self.m-1):
# gw[i] -= 2 * self.lambda_w * w[i]
# gv[i] -= 2 * self.lambda_v * v[i]
class model_school(model):
def __init__(self, data):
model.__init__(self, data)
def get_mean(self, i, k):
return self.F[i].dot(self.w[k])
def get_std(self, i, k):
return np.exp(self.F[i].dot(self.v[k]))
def get_var(self, i, k):
return pow(self.get_std(i,k), 2)
def e_step(self):
"""
evaluate posterior over Z
"""
self.pt = []
for i in range(self.n):
self.pt.append([])
workers, labels = self.L[i]
for w, l in zip(workers, labels):
p1 = scipy.stats.norm.pdf(l, loc = self.get_mean(i, 1), scale = self.get_std(i, 1) ) * self.theta[w]
p0 = scipy.stats.norm.pdf(l, loc = self.get_mean(i, 0), scale = self.get_std(i, 0) ) * (1-self.theta[w])
p = p1 *1.0/ (p0 + p1)
self.pt[i].append(p)
def expected_ll(self, x):
"""
return expected log likelihood
"""
l = len(x)/2
w = x[:l].reshape((2, self.m))
v = x[l:].reshape((2, self.m))
res = 0
for i in range(self.n):
workers, labels = self.L[i]
for worker, l, pt1 in zip(workers, labels, self.pt[i]):
pt0 = 1 - pt1
#theta = self.theta[worker]
ll = [0,0]
for k in [0,1]:
mean = self.F[i].dot(w[k])
std = np.exp(self.F[i].dot(v[k]))
if std < self.ep: std = self.ep
ll[k] = scipy.stats.norm.logpdf(l, loc = mean, scale = std )# + np.log(theta)
res += pt0*ll[0] + pt1*ll[1]
#regularization
#for i in range(self.m-1):
# res -= self.lambda_w * w[i]*w[i] + self.lambda_v * v[i]*v[i]
return res
def grad_expected_ll(self, x):
l = len(x)/2
w = x[:l].reshape((2, self.m))
v = x[l:].reshape((2, self.m))
gw = np.zeros( (2, self.m) )
gv = np.zeros( (2, self.m) )
for k in [0,1]:
for i in range(self.n):
workers, labels = self.L[i]
for worker, l, pt1 in zip(workers, labels, self.pt[i]):
wtc = self.F[i].dot(w[k])
sigma = np.exp(self.F[i].dot(v[k]))
if sigma < self.ep: sigma = self.ep
pt = pt1 if (k==1) else 1 - pt1
update_w = pt*(l-wtc)/pow(sigma,2)*self.F[i]
gw[k] += update_w[k]
update_v = pt*(-self.F[i] + pow(l-wtc,2)/pow(sigma,2)*self.F[i])
gv[k] += update_v[k]
# regularization
#for i in range(self.m-1):
# gw[i] -= 2 * self.lambda_w * w[i]
# gv[i] -= 2 * self.lambda_v * v[i]
return np.hstack( (gw[0,:], gw[1,:], gv[0,:], gv[1,:]) )
def m_step_wv(self):
"""
maximize w and v
"""
m = self.m
f = lambda x: -self.expected_ll(x)
fp = lambda x: -self.grad_expected_ll(x)
x0 = np.hstack( (self.w.reshape((2*m,)), self.v.reshape((2*m,))) )
#opt_method = 'Nelder-Mead'
opt_method = 'BFGS'
res = scipy.optimize.minimize(f, x0, method=opt_method, jac=fp)
#print res
x = res.x
l = len(x)/2
self.w = x[:l].reshape((2, self.m))
self.v = x[l:].reshape((2, self.m))
def m_step(self):
"""
maximize expected ll of w, v, theta
"""
self.m_step_theta()
self.m_step_wv()
def init_wv(self):
"""
init the params w and v
using the results of linear regression on empirical
"""
self.lr = sklearn.linear_model.LinearRegression(fit_intercept = False)
self.lr.fit(self.F, self.empi_mean)
self.w[0] = self.lr.coef_ + self.rs.normal(0, 0.1, self.m)
self.w[1] = self.lr.coef_ + self.rs.normal(0, 0.1, self.m)
self.lr.fit(self.F, np.log( pow(np.asarray(self.empi_var), 0.5)) )
self.v[0] = self.lr.coef_ + self.rs.normal(0, 0.1, self.m)
self.v[1] = self.lr.coef_ + self.rs.normal(0, 0.1, self.m)
def init_em(self, rseed = 1):
"""
init w, v, theta
"""
self.rs = np.random.RandomState(rseed)
self.w = np.zeros((2, self.m))
self.v = np.zeros((2, self.m))
self.theta = {}
for w in self.dic_w_il:
self.theta[w] = self.rs.rand()
self.init_wv()
def em(self, n_it = 3):
"""
"""
# iterate
for it in range(n_it):
self.e_step()
self.m_step()
| mit |
miptliot/edx-platform | lms/djangoapps/course_api/blocks/serializers.py | 13 | 3789 | """
Serializers for Course Blocks related return objects.
"""
from django.conf import settings
from rest_framework import serializers
from rest_framework.reverse import reverse
from .transformers import SUPPORTED_FIELDS
class BlockSerializer(serializers.Serializer): # pylint: disable=abstract-method
"""
Serializer for single course block
"""
def _get_field(self, block_key, transformer, field_name, default):
"""
Get the field value requested. The field may be an XBlock field, a
transformer block field, or an entire tranformer block data dict.
"""
value = None
if transformer is None:
value = self.context['block_structure'].get_xblock_field(block_key, field_name)
elif field_name is None:
try:
value = self.context['block_structure'].get_transformer_block_data(block_key, transformer).fields
except KeyError:
pass
else:
value = self.context['block_structure'].get_transformer_block_field(block_key, transformer, field_name)
return value if (value is not None) else default
def to_representation(self, block_key):
"""
Return a serializable representation of the requested block
"""
# create response data dict for basic fields
data = {
'id': unicode(block_key),
'block_id': unicode(block_key.block_id),
'lms_web_url': reverse(
'jump_to',
kwargs={'course_id': unicode(block_key.course_key), 'location': unicode(block_key)},
request=self.context['request'],
),
'student_view_url': reverse(
'courseware.views.views.render_xblock',
kwargs={'usage_key_string': unicode(block_key)},
request=self.context['request'],
),
}
if settings.FEATURES.get("ENABLE_LTI_PROVIDER") and 'lti_url' in self.context['requested_fields']:
data['lti_url'] = reverse(
'lti_provider_launch',
kwargs={'course_id': unicode(block_key.course_key), 'usage_id': unicode(block_key)},
request=self.context['request'],
)
# add additional requested fields that are supported by the various transformers
for supported_field in SUPPORTED_FIELDS:
if supported_field.requested_field_name in self.context['requested_fields']:
field_value = self._get_field(
block_key,
supported_field.transformer,
supported_field.block_field_name,
supported_field.default_value,
)
if field_value is not None:
# only return fields that have data
data[supported_field.serializer_field_name] = field_value
if 'children' in self.context['requested_fields']:
children = self.context['block_structure'].get_children(block_key)
if children:
data['children'] = [unicode(child) for child in children]
return data
class BlockDictSerializer(serializers.Serializer): # pylint: disable=abstract-method
"""
Serializer that formats a BlockStructure object to a dictionary, rather
than a list, of blocks
"""
root = serializers.CharField(source='root_block_usage_key')
blocks = serializers.SerializerMethodField()
def get_blocks(self, structure):
"""
Serialize to a dictionary of blocks keyed by the block's usage_key.
"""
return {
unicode(block_key): BlockSerializer(block_key, context=self.context).data
for block_key in structure
}
| agpl-3.0 |
ljjt/azure-sdk-for-python | azure/storage/blobservice.py | 1 | 109280 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure import (
WindowsAzureError,
BLOB_SERVICE_HOST_BASE,
DEV_BLOB_HOST,
_ERROR_VALUE_NEGATIVE,
_ERROR_PAGE_BLOB_SIZE_ALIGNMENT,
_convert_class_to_xml,
_dont_fail_not_exist,
_dont_fail_on_exist,
_encode_base64,
_get_request_body,
_get_request_body_bytes_only,
_int_or_none,
_parse_enum_results_list,
_parse_response,
_parse_response_for_dict,
_parse_response_for_dict_filter,
_parse_response_for_dict_prefix,
_parse_simple_list,
_str,
_str_or_none,
_update_request_uri_query_local_storage,
_validate_type_bytes,
_validate_not_none,
)
from azure.http import HTTPRequest
from azure.storage import (
Container,
ContainerEnumResults,
PageList,
PageRange,
SignedIdentifiers,
StorageServiceProperties,
_convert_block_list_to_xml,
_convert_response_to_block_list,
_create_blob_result,
_parse_blob_enum_results_list,
_update_storage_blob_header,
)
from azure.storage.storageclient import _StorageClient
from os import path
import sys
if sys.version_info >= (3,):
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
# Keep this value sync with _ERROR_PAGE_BLOB_SIZE_ALIGNMENT
_PAGE_SIZE = 512
class BlobService(_StorageClient):
'''
This is the main class managing Blob resources.
'''
def __init__(self, account_name=None, account_key=None, protocol='https',
host_base=BLOB_SERVICE_HOST_BASE, dev_host=DEV_BLOB_HOST):
'''
account_name: your storage account name, required for all operations.
account_key: your storage account key, required for all operations.
protocol: Optional. Protocol. Defaults to https.
host_base:
Optional. Live host base url. Defaults to Azure url. Override this
for on-premise.
dev_host: Optional. Dev host url. Defaults to localhost.
'''
self._BLOB_MAX_DATA_SIZE = 64 * 1024 * 1024
self._BLOB_MAX_CHUNK_DATA_SIZE = 4 * 1024 * 1024
super(BlobService, self).__init__(
account_name, account_key, protocol, host_base, dev_host)
def make_blob_url(self, container_name, blob_name, account_name=None,
protocol=None, host_base=None):
'''
Creates the url to access a blob.
container_name: Name of container.
blob_name: Name of blob.
account_name:
Name of the storage account. If not specified, uses the account
specified when BlobService was initialized.
protocol:
Protocol to use: 'http' or 'https'. If not specified, uses the
protocol specified when BlobService was initialized.
host_base:
Live host base url. If not specified, uses the host base specified
when BlobService was initialized.
'''
if not account_name:
account_name = self.account_name
if not protocol:
protocol = self.protocol
if not host_base:
host_base = self.host_base
return '{0}://{1}{2}/{3}/{4}'.format(protocol,
account_name,
host_base,
container_name,
blob_name)
def list_containers(self, prefix=None, marker=None, maxresults=None,
include=None):
'''
The List Containers operation returns a list of the containers under
the specified account.
prefix:
Optional. Filters the results to return only containers whose names
begin with the specified prefix.
marker:
Optional. A string value that identifies the portion of the list to
be returned with the next list operation.
maxresults:
Optional. Specifies the maximum number of containers to return.
include:
Optional. Include this parameter to specify that the container's
metadata be returned as part of the response body. set this
parameter to string 'metadata' to get container's metadata.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/?comp=list'
request.query = [
('prefix', _str_or_none(prefix)),
('marker', _str_or_none(marker)),
('maxresults', _int_or_none(maxresults)),
('include', _str_or_none(include))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_enum_results_list(response,
ContainerEnumResults,
"Containers",
Container)
def create_container(self, container_name, x_ms_meta_name_values=None,
x_ms_blob_public_access=None, fail_on_exist=False):
'''
Creates a new container under the specified account. If the container
with the same name already exists, the operation fails.
container_name: Name of container to create.
x_ms_meta_name_values:
Optional. A dict with name_value pairs to associate with the
container as metadata. Example:{'Category':'test'}
x_ms_blob_public_access:
Optional. Possible values include: container, blob
fail_on_exist:
specify whether to throw an exception when the container exists.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(container_name) + '?restype=container'
request.headers = [
('x-ms-meta-name-values', x_ms_meta_name_values),
('x-ms-blob-public-access', _str_or_none(x_ms_blob_public_access))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_container_properties(self, container_name, x_ms_lease_id=None):
'''
Returns all user-defined metadata and system properties for the
specified container.
container_name: Name of existing container.
x_ms_lease_id:
If specified, get_container_properties only succeeds if the
container's lease is active and matches this ID.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(container_name) + '?restype=container'
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict(response)
def get_container_metadata(self, container_name, x_ms_lease_id=None):
'''
Returns all user-defined metadata for the specified container. The
metadata will be in returned dictionary['x-ms-meta-(name)'].
container_name: Name of existing container.
x_ms_lease_id:
If specified, get_container_metadata only succeeds if the
container's lease is active and matches this ID.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '?restype=container&comp=metadata'
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict_prefix(response, prefixes=['x-ms-meta'])
def set_container_metadata(self, container_name,
x_ms_meta_name_values=None, x_ms_lease_id=None):
'''
Sets one or more user-defined name-value pairs for the specified
container.
container_name: Name of existing container.
x_ms_meta_name_values:
A dict containing name, value for metadata.
Example: {'category':'test'}
x_ms_lease_id:
If specified, set_container_metadata only succeeds if the
container's lease is active and matches this ID.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '?restype=container&comp=metadata'
request.headers = [
('x-ms-meta-name-values', x_ms_meta_name_values),
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def get_container_acl(self, container_name, x_ms_lease_id=None):
'''
Gets the permissions for the specified container.
container_name: Name of existing container.
x_ms_lease_id:
If specified, get_container_acl only succeeds if the
container's lease is active and matches this ID.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '?restype=container&comp=acl'
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response(response, SignedIdentifiers)
def set_container_acl(self, container_name, signed_identifiers=None,
x_ms_blob_public_access=None, x_ms_lease_id=None):
'''
Sets the permissions for the specified container.
container_name: Name of existing container.
signed_identifiers: SignedIdentifers instance
x_ms_blob_public_access:
Optional. Possible values include: container, blob
x_ms_lease_id:
If specified, set_container_acl only succeeds if the
container's lease is active and matches this ID.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '?restype=container&comp=acl'
request.headers = [
('x-ms-blob-public-access', _str_or_none(x_ms_blob_public_access)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
]
request.body = _get_request_body(
_convert_class_to_xml(signed_identifiers))
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def delete_container(self, container_name, fail_not_exist=False,
x_ms_lease_id=None):
'''
Marks the specified container for deletion.
container_name: Name of container to delete.
fail_not_exist:
Specify whether to throw an exception when the container doesn't
exist.
x_ms_lease_id: Required if the container has an active lease.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(container_name) + '?restype=container'
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def lease_container(self, container_name, x_ms_lease_action,
x_ms_lease_id=None, x_ms_lease_duration=60,
x_ms_lease_break_period=None,
x_ms_proposed_lease_id=None):
'''
Establishes and manages a lock on a container for delete operations.
The lock duration can be 15 to 60 seconds, or can be infinite.
container_name: Name of existing container.
x_ms_lease_action:
Required. Possible values: acquire|renew|release|break|change
x_ms_lease_id: Required if the container has an active lease.
x_ms_lease_duration:
Specifies the duration of the lease, in seconds, or negative one
(-1) for a lease that never expires. A non-infinite lease can be
between 15 and 60 seconds. A lease duration cannot be changed
using renew or change. For backwards compatibility, the default is
60, and the value is only used on an acquire operation.
x_ms_lease_break_period:
Optional. For a break operation, this is the proposed duration of
seconds that the lease should continue before it is broken, between
0 and 60 seconds. This break period is only used if it is shorter
than the time remaining on the lease. If longer, the time remaining
on the lease is used. A new lease will not be available before the
break period has expired, but the lease may be held for longer than
the break period. If this header does not appear with a break
operation, a fixed-duration lease breaks after the remaining lease
period elapses, and an infinite lease breaks immediately.
x_ms_proposed_lease_id:
Optional for acquire, required for change. Proposed lease ID, in a
GUID string format.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('x_ms_lease_action', x_ms_lease_action)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '?restype=container&comp=lease'
request.headers = [
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
('x-ms-lease-action', _str_or_none(x_ms_lease_action)),
('x-ms-lease-duration',
_str_or_none(
x_ms_lease_duration if x_ms_lease_action == 'acquire'\
else None)),
('x-ms-lease-break-period', _str_or_none(x_ms_lease_break_period)),
('x-ms-proposed-lease-id', _str_or_none(x_ms_proposed_lease_id)),
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict_filter(
response,
filter=['x-ms-lease-id', 'x-ms-lease-time'])
def list_blobs(self, container_name, prefix=None, marker=None,
maxresults=None, include=None, delimiter=None):
'''
Returns the list of blobs under the specified container.
container_name: Name of existing container.
prefix:
Optional. Filters the results to return only blobs whose names
begin with the specified prefix.
marker:
Optional. A string value that identifies the portion of the list
to be returned with the next list operation. The operation returns
a marker value within the response body if the list returned was
not complete. The marker value may then be used in a subsequent
call to request the next set of list items. The marker value is
opaque to the client.
maxresults:
Optional. Specifies the maximum number of blobs to return,
including all BlobPrefix elements. If the request does not specify
maxresults or specifies a value greater than 5,000, the server will
return up to 5,000 items. Setting maxresults to a value less than
or equal to zero results in error response code 400 (Bad Request).
include:
Optional. Specifies one or more datasets to include in the
response. To specify more than one of these options on the URI,
you must separate each option with a comma. Valid values are:
snapshots:
Specifies that snapshots should be included in the
enumeration. Snapshots are listed from oldest to newest in
the response.
metadata:
Specifies that blob metadata be returned in the response.
uncommittedblobs:
Specifies that blobs for which blocks have been uploaded,
but which have not been committed using Put Block List
(REST API), be included in the response.
copy:
Version 2012-02-12 and newer. Specifies that metadata
related to any current or previous Copy Blob operation
should be included in the response.
delimiter:
Optional. When the request includes this parameter, the operation
returns a BlobPrefix element in the response body that acts as a
placeholder for all blobs whose names begin with the same
substring up to the appearance of the delimiter character. The
delimiter may be a single character or a string.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '?restype=container&comp=list'
request.query = [
('prefix', _str_or_none(prefix)),
('delimiter', _str_or_none(delimiter)),
('marker', _str_or_none(marker)),
('maxresults', _int_or_none(maxresults)),
('include', _str_or_none(include))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_blob_enum_results_list(response)
def set_blob_service_properties(self, storage_service_properties,
timeout=None):
'''
Sets the properties of a storage account's Blob service, including
Windows Azure Storage Analytics. You can also use this operation to
set the default request version for all incoming requests that do not
have a version specified.
storage_service_properties: a StorageServiceProperties object.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('storage_service_properties',
storage_service_properties)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.query = [('timeout', _int_or_none(timeout))]
request.body = _get_request_body(
_convert_class_to_xml(storage_service_properties))
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def get_blob_service_properties(self, timeout=None):
'''
Gets the properties of a storage account's Blob service, including
Windows Azure Storage Analytics.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response(response, StorageServiceProperties)
def get_blob_properties(self, container_name, blob_name,
x_ms_lease_id=None):
'''
Returns all user-defined metadata, standard HTTP properties, and
system properties for the blob.
container_name: Name of existing container.
blob_name: Name of existing blob.
x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'HEAD'
request.host = self._get_host()
request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict(response)
def set_blob_properties(self, container_name, blob_name,
x_ms_blob_cache_control=None,
x_ms_blob_content_type=None,
x_ms_blob_content_md5=None,
x_ms_blob_content_encoding=None,
x_ms_blob_content_language=None,
x_ms_lease_id=None):
'''
Sets system properties on the blob.
container_name: Name of existing container.
blob_name: Name of existing blob.
x_ms_blob_cache_control:
Optional. Modifies the cache control string for the blob.
x_ms_blob_content_type: Optional. Sets the blob's content type.
x_ms_blob_content_md5: Optional. Sets the blob's MD5 hash.
x_ms_blob_content_encoding: Optional. Sets the blob's content encoding.
x_ms_blob_content_language: Optional. Sets the blob's content language.
x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '/' + _str(blob_name) + '?comp=properties'
request.headers = [
('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),
('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),
('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)),
('x-ms-blob-content-encoding',
_str_or_none(x_ms_blob_content_encoding)),
('x-ms-blob-content-language',
_str_or_none(x_ms_blob_content_language)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def put_blob(self, container_name, blob_name, blob, x_ms_blob_type,
content_encoding=None, content_language=None,
content_md5=None, cache_control=None,
x_ms_blob_content_type=None, x_ms_blob_content_encoding=None,
x_ms_blob_content_language=None, x_ms_blob_content_md5=None,
x_ms_blob_cache_control=None, x_ms_meta_name_values=None,
x_ms_lease_id=None, x_ms_blob_content_length=None,
x_ms_blob_sequence_number=None):
'''
Creates a new block blob or page blob, or updates the content of an
existing block blob.
See put_block_blob_from_* and put_page_blob_from_* for high level
functions that handle the creation and upload of large blobs with
automatic chunking and progress notifications.
container_name: Name of existing container.
blob_name: Name of blob to create or update.
blob:
For BlockBlob:
Content of blob as bytes (size < 64MB). For larger size, you
must call put_block and put_block_list to set content of blob.
For PageBlob:
Use None and call put_page to set content of blob.
x_ms_blob_type: Required. Could be BlockBlob or PageBlob.
content_encoding:
Optional. Specifies which content encodings have been applied to
the blob. This value is returned to the client when the Get Blob
(REST API) operation is performed on the blob resource. The client
can use this value when returned to decode the blob content.
content_language:
Optional. Specifies the natural languages used by this resource.
content_md5:
Optional. An MD5 hash of the blob content. This hash is used to
verify the integrity of the blob during transport. When this header
is specified, the storage service checks the hash that has arrived
with the one that was sent. If the two hashes do not match, the
operation will fail with error code 400 (Bad Request).
cache_control:
Optional. The Blob service stores this value but does not use or
modify it.
x_ms_blob_content_type: Optional. Set the blob's content type.
x_ms_blob_content_encoding: Optional. Set the blob's content encoding.
x_ms_blob_content_language: Optional. Set the blob's content language.
x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.
x_ms_blob_cache_control: Optional. Sets the blob's cache control.
x_ms_meta_name_values: A dict containing name, value for metadata.
x_ms_lease_id: Required if the blob has an active lease.
x_ms_blob_content_length:
Required for page blobs. This header specifies the maximum size
for the page blob, up to 1 TB. The page blob size must be aligned
to a 512-byte boundary.
x_ms_blob_sequence_number:
Optional. Set for page blobs only. The sequence number is a
user-controlled value that you can use to track requests. The
value of the sequence number must be between 0 and 2^63 - 1. The
default value is 0.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('x_ms_blob_type', x_ms_blob_type)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''
request.headers = [
('x-ms-blob-type', _str_or_none(x_ms_blob_type)),
('Content-Encoding', _str_or_none(content_encoding)),
('Content-Language', _str_or_none(content_language)),
('Content-MD5', _str_or_none(content_md5)),
('Cache-Control', _str_or_none(cache_control)),
('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),
('x-ms-blob-content-encoding',
_str_or_none(x_ms_blob_content_encoding)),
('x-ms-blob-content-language',
_str_or_none(x_ms_blob_content_language)),
('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)),
('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),
('x-ms-meta-name-values', x_ms_meta_name_values),
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
('x-ms-blob-content-length',
_str_or_none(x_ms_blob_content_length)),
('x-ms-blob-sequence-number',
_str_or_none(x_ms_blob_sequence_number))
]
request.body = _get_request_body_bytes_only('blob', blob)
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def put_block_blob_from_path(self, container_name, blob_name, file_path,
content_encoding=None, content_language=None,
content_md5=None, cache_control=None,
x_ms_blob_content_type=None,
x_ms_blob_content_encoding=None,
x_ms_blob_content_language=None,
x_ms_blob_content_md5=None,
x_ms_blob_cache_control=None,
x_ms_meta_name_values=None,
x_ms_lease_id=None, progress_callback=None):
'''
Creates a new block blob from a file path, or updates the content of an
existing block blob, with automatic chunking and progress notifications.
container_name: Name of existing container.
blob_name: Name of blob to create or update.
file_path: Path of the file to upload as the blob content.
content_encoding:
Optional. Specifies which content encodings have been applied to
the blob. This value is returned to the client when the Get Blob
(REST API) operation is performed on the blob resource. The client
can use this value when returned to decode the blob content.
content_language:
Optional. Specifies the natural languages used by this resource.
content_md5:
Optional. An MD5 hash of the blob content. This hash is used to
verify the integrity of the blob during transport. When this header
is specified, the storage service checks the hash that has arrived
with the one that was sent. If the two hashes do not match, the
operation will fail with error code 400 (Bad Request).
cache_control:
Optional. The Blob service stores this value but does not use or
modify it.
x_ms_blob_content_type: Optional. Set the blob's content type.
x_ms_blob_content_encoding: Optional. Set the blob's content encoding.
x_ms_blob_content_language: Optional. Set the blob's content language.
x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.
x_ms_blob_cache_control: Optional. Sets the blob's cache control.
x_ms_meta_name_values: A dict containing name, value for metadata.
x_ms_lease_id: Required if the blob has an active lease.
progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('file_path', file_path)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''
request.headers = [
('x-ms-blob-type', 'BlockBlob'),
('Content-Encoding', _str_or_none(content_encoding)),
('Content-Language', _str_or_none(content_language)),
('Content-MD5', _str_or_none(content_md5)),
('Cache-Control', _str_or_none(cache_control)),
('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),
('x-ms-blob-content-encoding',
_str_or_none(x_ms_blob_content_encoding)),
('x-ms-blob-content-language',
_str_or_none(x_ms_blob_content_language)),
('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)),
('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),
('x-ms-meta-name-values', x_ms_meta_name_values),
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
]
count = path.getsize(file_path)
with open(file_path, 'rb') as stream:
self.put_block_blob_from_file(container_name,
blob_name,
stream,
count,
content_encoding,
content_language,
content_md5,
cache_control,
x_ms_blob_content_type,
x_ms_blob_content_encoding,
x_ms_blob_content_language,
x_ms_blob_content_md5,
x_ms_blob_cache_control,
x_ms_meta_name_values,
x_ms_lease_id,
progress_callback)
def put_block_blob_from_file(self, container_name, blob_name, stream,
count=None, content_encoding=None,
content_language=None, content_md5=None,
cache_control=None,
x_ms_blob_content_type=None,
x_ms_blob_content_encoding=None,
x_ms_blob_content_language=None,
x_ms_blob_content_md5=None,
x_ms_blob_cache_control=None,
x_ms_meta_name_values=None,
x_ms_lease_id=None, progress_callback=None):
'''
Creates a new block blob from a file/stream, or updates the content of
an existing block blob, with automatic chunking and progress
notifications.
container_name: Name of existing container.
blob_name: Name of blob to create or update.
stream: Opened file/stream to upload as the blob content.
count:
Number of bytes to read from the stream. This is optional, but
should be supplied for optimal performance.
content_encoding:
Optional. Specifies which content encodings have been applied to
the blob. This value is returned to the client when the Get Blob
(REST API) operation is performed on the blob resource. The client
can use this value when returned to decode the blob content.
content_language:
Optional. Specifies the natural languages used by this resource.
content_md5:
Optional. An MD5 hash of the blob content. This hash is used to
verify the integrity of the blob during transport. When this header
is specified, the storage service checks the hash that has arrived
with the one that was sent. If the two hashes do not match, the
operation will fail with error code 400 (Bad Request).
cache_control:
Optional. The Blob service stores this value but does not use or
modify it.
x_ms_blob_content_type: Optional. Set the blob's content type.
x_ms_blob_content_encoding: Optional. Set the blob's content encoding.
x_ms_blob_content_language: Optional. Set the blob's content language.
x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.
x_ms_blob_cache_control: Optional. Sets the blob's cache control.
x_ms_meta_name_values: A dict containing name, value for metadata.
x_ms_lease_id: Required if the blob has an active lease.
progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('stream', stream)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''
request.headers = [
('x-ms-blob-type', 'BlockBlob'),
('Content-Encoding', _str_or_none(content_encoding)),
('Content-Language', _str_or_none(content_language)),
('Content-MD5', _str_or_none(content_md5)),
('Cache-Control', _str_or_none(cache_control)),
('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),
('x-ms-blob-content-encoding',
_str_or_none(x_ms_blob_content_encoding)),
('x-ms-blob-content-language',
_str_or_none(x_ms_blob_content_language)),
('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)),
('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),
('x-ms-meta-name-values', x_ms_meta_name_values),
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
]
if count and count < self._BLOB_MAX_DATA_SIZE:
if progress_callback:
progress_callback(0, count)
data = stream.read(count)
self.put_blob(container_name,
blob_name,
data,
'BlockBlob',
content_encoding,
content_language,
content_md5,
cache_control,
x_ms_blob_content_type,
x_ms_blob_content_encoding,
x_ms_blob_content_language,
x_ms_blob_content_md5,
x_ms_blob_cache_control,
x_ms_meta_name_values,
x_ms_lease_id)
if progress_callback:
progress_callback(count, count)
else:
if progress_callback:
progress_callback(0, count)
self.put_blob(container_name,
blob_name,
None,
'BlockBlob',
content_encoding,
content_language,
content_md5,
cache_control,
x_ms_blob_content_type,
x_ms_blob_content_encoding,
x_ms_blob_content_language,
x_ms_blob_content_md5,
x_ms_blob_cache_control,
x_ms_meta_name_values,
x_ms_lease_id)
remain_bytes = count
block_ids = []
block_index = 0
index = 0
while True:
request_count = self._BLOB_MAX_CHUNK_DATA_SIZE\
if remain_bytes is None else min(
remain_bytes,
self._BLOB_MAX_CHUNK_DATA_SIZE)
data = stream.read(request_count)
if data:
length = len(data)
index += length
remain_bytes = remain_bytes - \
length if remain_bytes else None
block_id = '{0:08d}'.format(block_index)
self.put_block(container_name, blob_name,
data, block_id, x_ms_lease_id=x_ms_lease_id)
block_ids.append(block_id)
block_index += 1
if progress_callback:
progress_callback(index, count)
else:
break
self.put_block_list(container_name, blob_name, block_ids)
def put_block_blob_from_bytes(self, container_name, blob_name, blob,
index=0, count=None, content_encoding=None,
content_language=None, content_md5=None,
cache_control=None,
x_ms_blob_content_type=None,
x_ms_blob_content_encoding=None,
x_ms_blob_content_language=None,
x_ms_blob_content_md5=None,
x_ms_blob_cache_control=None,
x_ms_meta_name_values=None,
x_ms_lease_id=None, progress_callback=None):
'''
Creates a new block blob from an array of bytes, or updates the content
of an existing block blob, with automatic chunking and progress
notifications.
container_name: Name of existing container.
blob_name: Name of blob to create or update.
blob: Content of blob as an array of bytes.
index: Start index in the array of bytes.
count:
Number of bytes to upload. Set to None or negative value to upload
all bytes starting from index.
content_encoding:
Optional. Specifies which content encodings have been applied to
the blob. This value is returned to the client when the Get Blob
(REST API) operation is performed on the blob resource. The client
can use this value when returned to decode the blob content.
content_language:
Optional. Specifies the natural languages used by this resource.
content_md5:
Optional. An MD5 hash of the blob content. This hash is used to
verify the integrity of the blob during transport. When this header
is specified, the storage service checks the hash that has arrived
with the one that was sent. If the two hashes do not match, the
operation will fail with error code 400 (Bad Request).
cache_control:
Optional. The Blob service stores this value but does not use or
modify it.
x_ms_blob_content_type: Optional. Set the blob's content type.
x_ms_blob_content_encoding: Optional. Set the blob's content encoding.
x_ms_blob_content_language: Optional. Set the blob's content language.
x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.
x_ms_blob_cache_control: Optional. Sets the blob's cache control.
x_ms_meta_name_values: A dict containing name, value for metadata.
x_ms_lease_id: Required if the blob has an active lease.
progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('blob', blob)
_validate_not_none('index', index)
_validate_type_bytes('blob', blob)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''
request.headers = [
('x-ms-blob-type', 'BlockBlob'),
('Content-Encoding', _str_or_none(content_encoding)),
('Content-Language', _str_or_none(content_language)),
('Content-MD5', _str_or_none(content_md5)),
('Cache-Control', _str_or_none(cache_control)),
('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),
('x-ms-blob-content-encoding',
_str_or_none(x_ms_blob_content_encoding)),
('x-ms-blob-content-language',
_str_or_none(x_ms_blob_content_language)),
('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)),
('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),
('x-ms-meta-name-values', x_ms_meta_name_values),
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
]
if index < 0:
raise TypeError(_ERROR_VALUE_NEGATIVE.format('index'))
if count is None or count < 0:
count = len(blob) - index
if count < self._BLOB_MAX_DATA_SIZE:
if progress_callback:
progress_callback(0, count)
data = blob[index: index + count]
self.put_blob(container_name,
blob_name,
data,
'BlockBlob',
content_encoding,
content_language,
content_md5,
cache_control,
x_ms_blob_content_type,
x_ms_blob_content_encoding,
x_ms_blob_content_language,
x_ms_blob_content_md5,
x_ms_blob_cache_control,
x_ms_meta_name_values,
x_ms_lease_id)
if progress_callback:
progress_callback(count, count)
else:
stream = BytesIO(blob)
stream.seek(index)
self.put_block_blob_from_file(container_name,
blob_name,
stream,
count,
content_encoding,
content_language,
content_md5,
cache_control,
x_ms_blob_content_type,
x_ms_blob_content_encoding,
x_ms_blob_content_language,
x_ms_blob_content_md5,
x_ms_blob_cache_control,
x_ms_meta_name_values,
x_ms_lease_id,
progress_callback)
def put_block_blob_from_text(self, container_name, blob_name, text,
text_encoding='utf-8',
content_encoding=None, content_language=None,
content_md5=None, cache_control=None,
x_ms_blob_content_type=None,
x_ms_blob_content_encoding=None,
x_ms_blob_content_language=None,
x_ms_blob_content_md5=None,
x_ms_blob_cache_control=None,
x_ms_meta_name_values=None,
x_ms_lease_id=None, progress_callback=None):
'''
Creates a new block blob from str/unicode, or updates the content of an
existing block blob, with automatic chunking and progress notifications.
container_name: Name of existing container.
blob_name: Name of blob to create or update.
text: Text to upload to the blob.
text_encoding: Encoding to use to convert the text to bytes.
content_encoding:
Optional. Specifies which content encodings have been applied to
the blob. This value is returned to the client when the Get Blob
(REST API) operation is performed on the blob resource. The client
can use this value when returned to decode the blob content.
content_language:
Optional. Specifies the natural languages used by this resource.
content_md5:
Optional. An MD5 hash of the blob content. This hash is used to
verify the integrity of the blob during transport. When this header
is specified, the storage service checks the hash that has arrived
with the one that was sent. If the two hashes do not match, the
operation will fail with error code 400 (Bad Request).
cache_control:
Optional. The Blob service stores this value but does not use or
modify it.
x_ms_blob_content_type: Optional. Set the blob's content type.
x_ms_blob_content_encoding: Optional. Set the blob's content encoding.
x_ms_blob_content_language: Optional. Set the blob's content language.
x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.
x_ms_blob_cache_control: Optional. Sets the blob's cache control.
x_ms_meta_name_values: A dict containing name, value for metadata.
x_ms_lease_id: Required if the blob has an active lease.
progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('text', text)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''
request.headers = [
('x-ms-blob-type', 'BlockBlob'),
('Content-Encoding', _str_or_none(content_encoding)),
('Content-Language', _str_or_none(content_language)),
('Content-MD5', _str_or_none(content_md5)),
('Cache-Control', _str_or_none(cache_control)),
('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),
('x-ms-blob-content-encoding',
_str_or_none(x_ms_blob_content_encoding)),
('x-ms-blob-content-language',
_str_or_none(x_ms_blob_content_language)),
('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)),
('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),
('x-ms-meta-name-values', x_ms_meta_name_values),
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
]
if not isinstance(text, bytes):
_validate_not_none('text_encoding', text_encoding)
text = text.encode(text_encoding)
self.put_block_blob_from_bytes(container_name,
blob_name,
text,
0,
len(text),
content_encoding,
content_language,
content_md5,
cache_control,
x_ms_blob_content_type,
x_ms_blob_content_encoding,
x_ms_blob_content_language,
x_ms_blob_content_md5,
x_ms_blob_cache_control,
x_ms_meta_name_values,
x_ms_lease_id,
progress_callback)
def put_page_blob_from_path(self, container_name, blob_name, file_path,
content_encoding=None, content_language=None,
content_md5=None, cache_control=None,
x_ms_blob_content_type=None,
x_ms_blob_content_encoding=None,
x_ms_blob_content_language=None,
x_ms_blob_content_md5=None,
x_ms_blob_cache_control=None,
x_ms_meta_name_values=None,
x_ms_lease_id=None,
x_ms_blob_sequence_number=None,
progress_callback=None):
'''
Creates a new page blob from a file path, or updates the content of an
existing page blob, with automatic chunking and progress notifications.
container_name: Name of existing container.
blob_name: Name of blob to create or update.
file_path: Path of the file to upload as the blob content.
content_encoding:
Optional. Specifies which content encodings have been applied to
the blob. This value is returned to the client when the Get Blob
(REST API) operation is performed on the blob resource. The client
can use this value when returned to decode the blob content.
content_language:
Optional. Specifies the natural languages used by this resource.
content_md5:
Optional. An MD5 hash of the blob content. This hash is used to
verify the integrity of the blob during transport. When this header
is specified, the storage service checks the hash that has arrived
with the one that was sent. If the two hashes do not match, the
operation will fail with error code 400 (Bad Request).
cache_control:
Optional. The Blob service stores this value but does not use or
modify it.
x_ms_blob_content_type: Optional. Set the blob's content type.
x_ms_blob_content_encoding: Optional. Set the blob's content encoding.
x_ms_blob_content_language: Optional. Set the blob's content language.
x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.
x_ms_blob_cache_control: Optional. Sets the blob's cache control.
x_ms_meta_name_values: A dict containing name, value for metadata.
x_ms_lease_id: Required if the blob has an active lease.
x_ms_blob_sequence_number:
Optional. Set for page blobs only. The sequence number is a
user-controlled value that you can use to track requests. The
value of the sequence number must be between 0 and 2^63 - 1. The
default value is 0.
progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('file_path', file_path)
count = path.getsize(file_path)
with open(file_path, 'rb') as stream:
self.put_page_blob_from_file(container_name,
blob_name,
stream,
count,
content_encoding,
content_language,
content_md5,
cache_control,
x_ms_blob_content_type,
x_ms_blob_content_encoding,
x_ms_blob_content_language,
x_ms_blob_content_md5,
x_ms_blob_cache_control,
x_ms_meta_name_values,
x_ms_lease_id,
x_ms_blob_sequence_number,
progress_callback)
def put_page_blob_from_file(self, container_name, blob_name, stream, count,
content_encoding=None, content_language=None,
content_md5=None, cache_control=None,
x_ms_blob_content_type=None,
x_ms_blob_content_encoding=None,
x_ms_blob_content_language=None,
x_ms_blob_content_md5=None,
x_ms_blob_cache_control=None,
x_ms_meta_name_values=None,
x_ms_lease_id=None,
x_ms_blob_sequence_number=None,
progress_callback=None):
'''
Creates a new page blob from a file/stream, or updates the content of an
existing page blob, with automatic chunking and progress notifications.
container_name: Name of existing container.
blob_name: Name of blob to create or update.
stream: Opened file/stream to upload as the blob content.
count:
Number of bytes to read from the stream. This is required, a page
blob cannot be created if the count is unknown.
content_encoding:
Optional. Specifies which content encodings have been applied to
the blob. This value is returned to the client when the Get Blob
(REST API) operation is performed on the blob resource. The client
can use this value when returned to decode the blob content.
content_language:
Optional. Specifies the natural languages used by this resource.
content_md5:
Optional. An MD5 hash of the blob content. This hash is used to
verify the integrity of the blob during transport. When this header
is specified, the storage service checks the hash that has arrived
with the one that was sent. If the two hashes do not match, the
operation will fail with error code 400 (Bad Request).
cache_control:
Optional. The Blob service stores this value but does not use or
modify it.
x_ms_blob_content_type: Optional. Set the blob's content type.
x_ms_blob_content_encoding: Optional. Set the blob's content encoding.
x_ms_blob_content_language: Optional. Set the blob's content language.
x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.
x_ms_blob_cache_control: Optional. Sets the blob's cache control.
x_ms_meta_name_values: A dict containing name, value for metadata.
x_ms_lease_id: Required if the blob has an active lease.
x_ms_blob_sequence_number:
Optional. Set for page blobs only. The sequence number is a
user-controlled value that you can use to track requests. The
value of the sequence number must be between 0 and 2^63 - 1. The
default value is 0.
progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('stream', stream)
_validate_not_none('count', count)
if count < 0:
raise TypeError(_ERROR_VALUE_NEGATIVE.format('count'))
if count % _PAGE_SIZE != 0:
raise TypeError(_ERROR_PAGE_BLOB_SIZE_ALIGNMENT.format(count))
if progress_callback:
progress_callback(0, count)
self.put_blob(container_name,
blob_name,
b'',
'PageBlob',
content_encoding,
content_language,
content_md5,
cache_control,
x_ms_blob_content_type,
x_ms_blob_content_encoding,
x_ms_blob_content_language,
x_ms_blob_content_md5,
x_ms_blob_cache_control,
x_ms_meta_name_values,
x_ms_lease_id,
count,
x_ms_blob_sequence_number)
remain_bytes = count
page_start = 0
while True:
request_count = min(remain_bytes, self._BLOB_MAX_CHUNK_DATA_SIZE)
data = stream.read(request_count)
if data:
length = len(data)
remain_bytes = remain_bytes - length
page_end = page_start + length - 1
self.put_page(container_name,
blob_name,
data,
'bytes={0}-{1}'.format(page_start, page_end),
'update',
x_ms_lease_id=x_ms_lease_id)
page_start = page_start + length
if progress_callback:
progress_callback(page_start, count)
else:
break
def put_page_blob_from_bytes(self, container_name, blob_name, blob,
index=0, count=None, content_encoding=None,
content_language=None, content_md5=None,
cache_control=None,
x_ms_blob_content_type=None,
x_ms_blob_content_encoding=None,
x_ms_blob_content_language=None,
x_ms_blob_content_md5=None,
x_ms_blob_cache_control=None,
x_ms_meta_name_values=None,
x_ms_lease_id=None,
x_ms_blob_sequence_number=None,
progress_callback=None):
'''
Creates a new page blob from an array of bytes, or updates the content
of an existing page blob, with automatic chunking and progress
notifications.
container_name: Name of existing container.
blob_name: Name of blob to create or update.
blob: Content of blob as an array of bytes.
index: Start index in the array of bytes.
count:
Number of bytes to upload. Set to None or negative value to upload
all bytes starting from index.
content_encoding:
Optional. Specifies which content encodings have been applied to
the blob. This value is returned to the client when the Get Blob
(REST API) operation is performed on the blob resource. The client
can use this value when returned to decode the blob content.
content_language:
Optional. Specifies the natural languages used by this resource.
content_md5:
Optional. An MD5 hash of the blob content. This hash is used to
verify the integrity of the blob during transport. When this header
is specified, the storage service checks the hash that has arrived
with the one that was sent. If the two hashes do not match, the
operation will fail with error code 400 (Bad Request).
cache_control:
Optional. The Blob service stores this value but does not use or
modify it.
x_ms_blob_content_type: Optional. Set the blob's content type.
x_ms_blob_content_encoding: Optional. Set the blob's content encoding.
x_ms_blob_content_language: Optional. Set the blob's content language.
x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.
x_ms_blob_cache_control: Optional. Sets the blob's cache control.
x_ms_meta_name_values: A dict containing name, value for metadata.
x_ms_lease_id: Required if the blob has an active lease.
x_ms_blob_sequence_number:
Optional. Set for page blobs only. The sequence number is a
user-controlled value that you can use to track requests. The
value of the sequence number must be between 0 and 2^63 - 1. The
default value is 0.
progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('blob', blob)
_validate_type_bytes('blob', blob)
if index < 0:
raise TypeError(_ERROR_VALUE_NEGATIVE.format('index'))
if count is None or count < 0:
count = len(blob) - index
stream = BytesIO(blob)
stream.seek(index)
self.put_page_blob_from_file(container_name,
blob_name,
stream,
count,
content_encoding,
content_language,
content_md5,
cache_control,
x_ms_blob_content_type,
x_ms_blob_content_encoding,
x_ms_blob_content_language,
x_ms_blob_content_md5,
x_ms_blob_cache_control,
x_ms_meta_name_values,
x_ms_lease_id,
x_ms_blob_sequence_number,
progress_callback)
def get_blob(self, container_name, blob_name, snapshot=None,
x_ms_range=None, x_ms_lease_id=None,
x_ms_range_get_content_md5=None):
'''
Reads or downloads a blob from the system, including its metadata and
properties.
See get_blob_to_* for high level functions that handle the download
of large blobs with automatic chunking and progress notifications.
container_name: Name of existing container.
blob_name: Name of existing blob.
snapshot:
Optional. The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to retrieve.
x_ms_range:
Optional. Return only the bytes of the blob in the specified range.
x_ms_lease_id: Required if the blob has an active lease.
x_ms_range_get_content_md5:
Optional. When this header is set to true and specified together
with the Range header, the service returns the MD5 hash for the
range, as long as the range is less than or equal to 4 MB in size.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''
request.headers = [
('x-ms-range', _str_or_none(x_ms_range)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
('x-ms-range-get-content-md5',
_str_or_none(x_ms_range_get_content_md5))
]
request.query = [('snapshot', _str_or_none(snapshot))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request, None)
return _create_blob_result(response)
def get_blob_to_path(self, container_name, blob_name, file_path,
open_mode='wb', snapshot=None, x_ms_lease_id=None,
progress_callback=None):
'''
Downloads a blob to a file path, with automatic chunking and progress
notifications.
container_name: Name of existing container.
blob_name: Name of existing blob.
file_path: Path of file to write to.
open_mode: Mode to use when opening the file.
snapshot:
Optional. The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to retrieve.
x_ms_lease_id: Required if the blob has an active lease.
progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('file_path', file_path)
_validate_not_none('open_mode', open_mode)
with open(file_path, open_mode) as stream:
self.get_blob_to_file(container_name,
blob_name,
stream,
snapshot,
x_ms_lease_id,
progress_callback)
def get_blob_to_file(self, container_name, blob_name, stream,
snapshot=None, x_ms_lease_id=None,
progress_callback=None):
'''
Downloads a blob to a file/stream, with automatic chunking and progress
notifications.
container_name: Name of existing container.
blob_name: Name of existing blob.
stream: Opened file/stream to write to.
snapshot:
Optional. The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to retrieve.
x_ms_lease_id: Required if the blob has an active lease.
progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('stream', stream)
props = self.get_blob_properties(container_name, blob_name)
blob_size = int(props['content-length'])
if blob_size < self._BLOB_MAX_DATA_SIZE:
if progress_callback:
progress_callback(0, blob_size)
data = self.get_blob(container_name,
blob_name,
snapshot,
x_ms_lease_id=x_ms_lease_id)
stream.write(data)
if progress_callback:
progress_callback(blob_size, blob_size)
else:
if progress_callback:
progress_callback(0, blob_size)
index = 0
while index < blob_size:
chunk_range = 'bytes={}-{}'.format(
index,
index + self._BLOB_MAX_CHUNK_DATA_SIZE - 1)
data = self.get_blob(
container_name, blob_name, x_ms_range=chunk_range)
length = len(data)
index += length
if length > 0:
stream.write(data)
if progress_callback:
progress_callback(index, blob_size)
if length < self._BLOB_MAX_CHUNK_DATA_SIZE:
break
else:
break
def get_blob_to_bytes(self, container_name, blob_name, snapshot=None,
x_ms_lease_id=None, progress_callback=None):
'''
Downloads a blob as an array of bytes, with automatic chunking and
progress notifications.
container_name: Name of existing container.
blob_name: Name of existing blob.
snapshot:
Optional. The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to retrieve.
x_ms_lease_id: Required if the blob has an active lease.
progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
stream = BytesIO()
self.get_blob_to_file(container_name,
blob_name,
stream,
snapshot,
x_ms_lease_id,
progress_callback)
return stream.getvalue()
def get_blob_to_text(self, container_name, blob_name, text_encoding='utf-8',
snapshot=None, x_ms_lease_id=None,
progress_callback=None):
'''
Downloads a blob as unicode text, with automatic chunking and progress
notifications.
container_name: Name of existing container.
blob_name: Name of existing blob.
text_encoding: Encoding to use when decoding the blob data.
snapshot:
Optional. The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to retrieve.
x_ms_lease_id: Required if the blob has an active lease.
progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('text_encoding', text_encoding)
result = self.get_blob_to_bytes(container_name,
blob_name,
snapshot,
x_ms_lease_id,
progress_callback)
return result.decode(text_encoding)
def get_blob_metadata(self, container_name, blob_name, snapshot=None,
x_ms_lease_id=None):
'''
Returns all user-defined metadata for the specified blob or snapshot.
container_name: Name of existing container.
blob_name: Name of existing blob.
snapshot:
Optional. The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to retrieve.
x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '/' + _str(blob_name) + '?comp=metadata'
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.query = [('snapshot', _str_or_none(snapshot))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict_prefix(response, prefixes=['x-ms-meta'])
def set_blob_metadata(self, container_name, blob_name,
x_ms_meta_name_values=None, x_ms_lease_id=None):
'''
Sets user-defined metadata for the specified blob as one or more
name-value pairs.
container_name: Name of existing container.
blob_name: Name of existing blob.
x_ms_meta_name_values: Dict containing name and value pairs.
x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '/' + _str(blob_name) + '?comp=metadata'
request.headers = [
('x-ms-meta-name-values', x_ms_meta_name_values),
('x-ms-lease-id', _str_or_none(x_ms_lease_id))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def lease_blob(self, container_name, blob_name, x_ms_lease_action,
x_ms_lease_id=None, x_ms_lease_duration=60,
x_ms_lease_break_period=None, x_ms_proposed_lease_id=None):
'''
Establishes and manages a one-minute lock on a blob for write
operations.
container_name: Name of existing container.
blob_name: Name of existing blob.
x_ms_lease_action:
Required. Possible values: acquire|renew|release|break|change
x_ms_lease_id: Required if the blob has an active lease.
x_ms_lease_duration:
Specifies the duration of the lease, in seconds, or negative one
(-1) for a lease that never expires. A non-infinite lease can be
between 15 and 60 seconds. A lease duration cannot be changed
using renew or change. For backwards compatibility, the default is
60, and the value is only used on an acquire operation.
x_ms_lease_break_period:
Optional. For a break operation, this is the proposed duration of
seconds that the lease should continue before it is broken, between
0 and 60 seconds. This break period is only used if it is shorter
than the time remaining on the lease. If longer, the time remaining
on the lease is used. A new lease will not be available before the
break period has expired, but the lease may be held for longer than
the break period. If this header does not appear with a break
operation, a fixed-duration lease breaks after the remaining lease
period elapses, and an infinite lease breaks immediately.
x_ms_proposed_lease_id:
Optional for acquire, required for change. Proposed lease ID, in a
GUID string format.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('x_ms_lease_action', x_ms_lease_action)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '/' + _str(blob_name) + '?comp=lease'
request.headers = [
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
('x-ms-lease-action', _str_or_none(x_ms_lease_action)),
('x-ms-lease-duration', _str_or_none(x_ms_lease_duration\
if x_ms_lease_action == 'acquire' else None)),
('x-ms-lease-break-period', _str_or_none(x_ms_lease_break_period)),
('x-ms-proposed-lease-id', _str_or_none(x_ms_proposed_lease_id)),
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict_filter(
response,
filter=['x-ms-lease-id', 'x-ms-lease-time'])
def snapshot_blob(self, container_name, blob_name,
x_ms_meta_name_values=None, if_modified_since=None,
if_unmodified_since=None, if_match=None,
if_none_match=None, x_ms_lease_id=None):
'''
Creates a read-only snapshot of a blob.
container_name: Name of existing container.
blob_name: Name of existing blob.
x_ms_meta_name_values: Optional. Dict containing name and value pairs.
if_modified_since: Optional. Datetime string.
if_unmodified_since: DateTime string.
if_match:
Optional. snapshot the blob only if its ETag value matches the
value specified.
if_none_match: Optional. An ETag value
x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '/' + _str(blob_name) + '?comp=snapshot'
request.headers = [
('x-ms-meta-name-values', x_ms_meta_name_values),
('If-Modified-Since', _str_or_none(if_modified_since)),
('If-Unmodified-Since', _str_or_none(if_unmodified_since)),
('If-Match', _str_or_none(if_match)),
('If-None-Match', _str_or_none(if_none_match)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict_filter(
response,
filter=['x-ms-snapshot', 'etag', 'last-modified'])
def copy_blob(self, container_name, blob_name, x_ms_copy_source,
x_ms_meta_name_values=None,
x_ms_source_if_modified_since=None,
x_ms_source_if_unmodified_since=None,
x_ms_source_if_match=None, x_ms_source_if_none_match=None,
if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, x_ms_lease_id=None,
x_ms_source_lease_id=None):
'''
Copies a blob to a destination within the storage account.
container_name: Name of existing container.
blob_name: Name of existing blob.
x_ms_copy_source:
URL up to 2 KB in length that specifies a blob. A source blob in
the same account can be private, but a blob in another account
must be public or accept credentials included in this URL, such as
a Shared Access Signature. Examples:
https://myaccount.blob.core.windows.net/mycontainer/myblob
https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
x_ms_meta_name_values: Optional. Dict containing name and value pairs.
x_ms_source_if_modified_since:
Optional. An ETag value. Specify this conditional header to copy
the source blob only if its ETag matches the value specified.
x_ms_source_if_unmodified_since:
Optional. An ETag value. Specify this conditional header to copy
the blob only if its ETag does not match the value specified.
x_ms_source_if_match:
Optional. A DateTime value. Specify this conditional header to
copy the blob only if the source blob has been modified since the
specified date/time.
x_ms_source_if_none_match:
Optional. An ETag value. Specify this conditional header to copy
the source blob only if its ETag matches the value specified.
if_modified_since: Optional. Datetime string.
if_unmodified_since: DateTime string.
if_match:
Optional. Snapshot the blob only if its ETag value matches the
value specified.
if_none_match: Optional. An ETag value
x_ms_lease_id: Required if the blob has an active lease.
x_ms_source_lease_id:
Optional. Specify this to perform the Copy Blob operation only if
the lease ID given matches the active lease ID of the source blob.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('x_ms_copy_source', x_ms_copy_source)
if x_ms_copy_source.startswith('/'):
# Backwards compatibility for earlier versions of the SDK where
# the copy source can be in the following formats:
# - Blob in named container:
# /accountName/containerName/blobName
# - Snapshot in named container:
# /accountName/containerName/blobName?snapshot=<DateTime>
# - Blob in root container:
# /accountName/blobName
# - Snapshot in root container:
# /accountName/blobName?snapshot=<DateTime>
account, _, source =\
x_ms_copy_source.partition('/')[2].partition('/')
x_ms_copy_source = self.protocol + '://' + \
account + self.host_base + '/' + source
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''
request.headers = [
('x-ms-copy-source', _str_or_none(x_ms_copy_source)),
('x-ms-meta-name-values', x_ms_meta_name_values),
('x-ms-source-if-modified-since',
_str_or_none(x_ms_source_if_modified_since)),
('x-ms-source-if-unmodified-since',
_str_or_none(x_ms_source_if_unmodified_since)),
('x-ms-source-if-match', _str_or_none(x_ms_source_if_match)),
('x-ms-source-if-none-match',
_str_or_none(x_ms_source_if_none_match)),
('If-Modified-Since', _str_or_none(if_modified_since)),
('If-Unmodified-Since', _str_or_none(if_unmodified_since)),
('If-Match', _str_or_none(if_match)),
('If-None-Match', _str_or_none(if_none_match)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
('x-ms-source-lease-id', _str_or_none(x_ms_source_lease_id))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict(response)
def abort_copy_blob(self, container_name, blob_name, x_ms_copy_id,
x_ms_lease_id=None):
'''
Aborts a pending copy_blob operation, and leaves a destination blob
with zero length and full metadata.
container_name: Name of destination container.
blob_name: Name of destination blob.
x_ms_copy_id:
Copy identifier provided in the x-ms-copy-id of the original
copy_blob operation.
x_ms_lease_id:
Required if the destination blob has an active infinite lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('x_ms_copy_id', x_ms_copy_id)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(container_name) + '/' + \
_str(blob_name) + '?comp=copy©id=' + \
_str(x_ms_copy_id)
request.headers = [
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
('x-ms-copy-action', 'abort'),
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def delete_blob(self, container_name, blob_name, snapshot=None,
x_ms_lease_id=None):
'''
Marks the specified blob or snapshot for deletion. The blob is later
deleted during garbage collection.
To mark a specific snapshot for deletion provide the date/time of the
snapshot via the snapshot parameter.
container_name: Name of existing container.
blob_name: Name of existing blob.
snapshot:
Optional. The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to delete.
x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.query = [('snapshot', _str_or_none(snapshot))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def put_block(self, container_name, blob_name, block, blockid,
content_md5=None, x_ms_lease_id=None):
'''
Creates a new block to be committed as part of a blob.
container_name: Name of existing container.
blob_name: Name of existing blob.
block: Content of the block.
blockid:
Required. A value that identifies the block. The string must be
less than or equal to 64 bytes in size.
content_md5:
Optional. An MD5 hash of the block content. This hash is used to
verify the integrity of the blob during transport. When this
header is specified, the storage service checks the hash that has
arrived with the one that was sent.
x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('block', block)
_validate_not_none('blockid', blockid)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '/' + _str(blob_name) + '?comp=block'
request.headers = [
('Content-MD5', _str_or_none(content_md5)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id))
]
request.query = [('blockid', _encode_base64(_str_or_none(blockid)))]
request.body = _get_request_body_bytes_only('block', block)
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def put_block_list(self, container_name, blob_name, block_list,
content_md5=None, x_ms_blob_cache_control=None,
x_ms_blob_content_type=None,
x_ms_blob_content_encoding=None,
x_ms_blob_content_language=None,
x_ms_blob_content_md5=None, x_ms_meta_name_values=None,
x_ms_lease_id=None):
'''
Writes a blob by specifying the list of block IDs that make up the
blob. In order to be written as part of a blob, a block must have been
successfully written to the server in a prior Put Block (REST API)
operation.
container_name: Name of existing container.
blob_name: Name of existing blob.
block_list: A str list containing the block ids.
content_md5:
Optional. An MD5 hash of the block content. This hash is used to
verify the integrity of the blob during transport. When this header
is specified, the storage service checks the hash that has arrived
with the one that was sent.
x_ms_blob_cache_control:
Optional. Sets the blob's cache control. If specified, this
property is stored with the blob and returned with a read request.
x_ms_blob_content_type:
Optional. Sets the blob's content type. If specified, this property
is stored with the blob and returned with a read request.
x_ms_blob_content_encoding:
Optional. Sets the blob's content encoding. If specified, this
property is stored with the blob and returned with a read request.
x_ms_blob_content_language:
Optional. Set the blob's content language. If specified, this
property is stored with the blob and returned with a read request.
x_ms_blob_content_md5:
Optional. An MD5 hash of the blob content. Note that this hash is
not validated, as the hashes for the individual blocks were
validated when each was uploaded.
x_ms_meta_name_values: Optional. Dict containing name and value pairs.
x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('block_list', block_list)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '/' + _str(blob_name) + '?comp=blocklist'
request.headers = [
('Content-MD5', _str_or_none(content_md5)),
('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),
('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),
('x-ms-blob-content-encoding',
_str_or_none(x_ms_blob_content_encoding)),
('x-ms-blob-content-language',
_str_or_none(x_ms_blob_content_language)),
('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)),
('x-ms-meta-name-values', x_ms_meta_name_values),
('x-ms-lease-id', _str_or_none(x_ms_lease_id))
]
request.body = _get_request_body(
_convert_block_list_to_xml(block_list))
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def get_block_list(self, container_name, blob_name, snapshot=None,
blocklisttype=None, x_ms_lease_id=None):
'''
Retrieves the list of blocks that have been uploaded as part of a
block blob.
container_name: Name of existing container.
blob_name: Name of existing blob.
snapshot:
Optional. Datetime to determine the time to retrieve the blocks.
blocklisttype:
Specifies whether to return the list of committed blocks, the list
of uncommitted blocks, or both lists together. Valid values are:
committed, uncommitted, or all.
x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '/' + _str(blob_name) + '?comp=blocklist'
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.query = [
('snapshot', _str_or_none(snapshot)),
('blocklisttype', _str_or_none(blocklisttype))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _convert_response_to_block_list(response)
def put_page(self, container_name, blob_name, page, x_ms_range,
x_ms_page_write, timeout=None, content_md5=None,
x_ms_lease_id=None, x_ms_if_sequence_number_lte=None,
x_ms_if_sequence_number_lt=None,
x_ms_if_sequence_number_eq=None,
if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None):
'''
Writes a range of pages to a page blob.
container_name: Name of existing container.
blob_name: Name of existing blob.
page: Content of the page.
x_ms_range:
Required. Specifies the range of bytes to be written as a page.
Both the start and end of the range must be specified. Must be in
format: bytes=startByte-endByte. Given that pages must be aligned
with 512-byte boundaries, the start offset must be a modulus of
512 and the end offset must be a modulus of 512-1. Examples of
valid byte ranges are 0-511, 512-1023, etc.
x_ms_page_write:
Required. You may specify one of the following options:
update (lower case):
Writes the bytes specified by the request body into the
specified range. The Range and Content-Length headers must
match to perform the update.
clear (lower case):
Clears the specified range and releases the space used in
storage for that range. To clear a range, set the
Content-Length header to zero, and the Range header to a
value that indicates the range to clear, up to maximum
blob size.
timeout: the timeout parameter is expressed in seconds.
content_md5:
Optional. An MD5 hash of the page content. This hash is used to
verify the integrity of the page during transport. When this header
is specified, the storage service compares the hash of the content
that has arrived with the header value that was sent. If the two
hashes do not match, the operation will fail with error code 400
(Bad Request).
x_ms_lease_id: Required if the blob has an active lease.
x_ms_if_sequence_number_lte:
Optional. If the blob's sequence number is less than or equal to
the specified value, the request proceeds; otherwise it fails.
x_ms_if_sequence_number_lt:
Optional. If the blob's sequence number is less than the specified
value, the request proceeds; otherwise it fails.
x_ms_if_sequence_number_eq:
Optional. If the blob's sequence number is equal to the specified
value, the request proceeds; otherwise it fails.
if_modified_since:
Optional. A DateTime value. Specify this conditional header to
write the page only if the blob has been modified since the
specified date/time. If the blob has not been modified, the Blob
service fails.
if_unmodified_since:
Optional. A DateTime value. Specify this conditional header to
write the page only if the blob has not been modified since the
specified date/time. If the blob has been modified, the Blob
service fails.
if_match:
Optional. An ETag value. Specify an ETag value for this conditional
header to write the page only if the blob's ETag value matches the
value specified. If the values do not match, the Blob service fails.
if_none_match:
Optional. An ETag value. Specify an ETag value for this conditional
header to write the page only if the blob's ETag value does not
match the value specified. If the values are identical, the Blob
service fails.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('page', page)
_validate_not_none('x_ms_range', x_ms_range)
_validate_not_none('x_ms_page_write', x_ms_page_write)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '/' + _str(blob_name) + '?comp=page'
request.headers = [
('x-ms-range', _str_or_none(x_ms_range)),
('Content-MD5', _str_or_none(content_md5)),
('x-ms-page-write', _str_or_none(x_ms_page_write)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
('x-ms-if-sequence-number-le',
_str_or_none(x_ms_if_sequence_number_lte)),
('x-ms-if-sequence-number-lt',
_str_or_none(x_ms_if_sequence_number_lt)),
('x-ms-if-sequence-number-eq',
_str_or_none(x_ms_if_sequence_number_eq)),
('If-Modified-Since', _str_or_none(if_modified_since)),
('If-Unmodified-Since', _str_or_none(if_unmodified_since)),
('If-Match', _str_or_none(if_match)),
('If-None-Match', _str_or_none(if_none_match))
]
request.query = [('timeout', _int_or_none(timeout))]
request.body = _get_request_body_bytes_only('page', page)
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def get_page_ranges(self, container_name, blob_name, snapshot=None,
range=None, x_ms_range=None, x_ms_lease_id=None):
'''
Retrieves the page ranges for a blob.
container_name: Name of existing container.
blob_name: Name of existing blob.
snapshot:
Optional. The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to retrieve information
from.
range:
Optional. Specifies the range of bytes over which to list ranges,
inclusively. If omitted, then all ranges for the blob are returned.
x_ms_range:
Optional. Specifies the range of bytes to be written as a page.
Both the start and end of the range must be specified. Must be in
format: bytes=startByte-endByte. Given that pages must be aligned
with 512-byte boundaries, the start offset must be a modulus of
512 and the end offset must be a modulus of 512-1. Examples of
valid byte ranges are 0-511, 512-1023, etc.
x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '/' + _str(blob_name) + '?comp=pagelist'
request.headers = [
('Range', _str_or_none(range)),
('x-ms-range', _str_or_none(x_ms_range)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id))
]
request.query = [('snapshot', _str_or_none(snapshot))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_simple_list(response, PageList, PageRange, "page_ranges")
| apache-2.0 |
kinnou02/navitia | source/jormungandr/jormungandr/interfaces/v1/serializer/api.py | 1 | 15557 | # coding: utf-8
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
import datetime
import pytz
from jormungandr.interfaces.v1.serializer import (
pt,
schedule,
report,
base,
status,
geo_status,
graphical_isochron,
heat_map,
)
from jormungandr.interfaces.v1.serializer.base import (
NullableDictSerializer,
LambdaField,
PbNestedSerializer,
DescribedField,
AmountSerializer,
)
from jormungandr.interfaces.v1.serializer.fields import (
ErrorSerializer,
FeedPublisherSerializer,
PaginationSerializer,
LinkSchema,
NoteSerializer,
ExceptionSerializer,
)
from jormungandr.interfaces.v1.make_links import create_external_link
from jormungandr.interfaces.v1.serializer.journey import TicketSerializer, JourneySerializer
import serpy
from jormungandr.interfaces.v1.serializer.jsonschema.fields import Field, MethodField
from jormungandr.interfaces.v1.serializer.time import DateTimeDictField
from jormungandr.utils import (
get_current_datetime_str,
get_timezone_str,
NOT_A_DATE_TIME,
navitia_utcfromtimestamp,
)
from jormungandr.interfaces.v1.serializer.pt import AddressSerializer
from jormungandr.interfaces.v1.serializer import jsonschema
from jormungandr.interfaces.v1.serializer.status import CoverageErrorSerializer
class CO2Serializer(PbNestedSerializer):
co2_emission = AmountSerializer(attr='car_co2_emission', display_none=False)
class ContextSerializer(PbNestedSerializer):
def __init__(self, obj=None, is_utc=False, *args, **kwargs):
super(ContextSerializer, self).__init__(obj, *args, **kwargs)
self.is_utc = is_utc
car_direct_path = MethodField(schema_type=CO2Serializer(), display_none=False)
current_datetime = MethodField(
schema_type=str, display_none=False, description='The datetime of the request (considered as "now")'
)
timezone = MethodField(
schema_type=str,
display_none=False,
description='Timezone of any datetime in the response, ' 'default value Africa/Abidjan (UTC)',
)
def get_car_direct_path(self, obj):
from navitiacommon import response_pb2
if isinstance(obj, response_pb2.Response) and obj.HasField(str('car_co2_emission')):
return CO2Serializer(obj, display_none=False).data
return None
def get_current_datetime(self, _):
return get_current_datetime_str(is_utc=self.is_utc)
def get_timezone(self, _):
return 'Africa/Abidjan' if self.is_utc else get_timezone_str()
class PTReferentialSerializerNoContext(serpy.Serializer):
pagination = PaginationSerializer(attr='pagination', display_none=True)
error = ErrorSerializer(display_none=False)
feed_publishers = FeedPublisherSerializer(many=True, display_none=True)
disruptions = pt.DisruptionSerializer(attr='impacts', many=True, display_none=True)
notes = DescribedField(schema_type=NoteSerializer(many=True))
links = DescribedField(schema_type=LinkSchema(many=True))
class PTReferentialSerializer(PTReferentialSerializerNoContext):
# ContextSerializer can not be used directly because context does not exist in protobuf
context = MethodField(schema_type=ContextSerializer(), display_none=False)
def get_context(self, obj):
return ContextSerializer(obj, display_none=False).data
class LinesSerializer(PTReferentialSerializer):
lines = pt.LineSerializer(many=True)
class DisruptionsSerializer(PTReferentialSerializer):
# we already have a disruptions fields by default
pass
class VehicleJourneysSerializer(PTReferentialSerializer):
vehicle_journeys = pt.VehicleJourneySerializer(many=True)
class TripsSerializer(PTReferentialSerializer):
trips = pt.TripSerializer(many=True)
class JourneyPatternsSerializer(PTReferentialSerializer):
journey_patterns = pt.JourneyPatternSerializer(many=True)
class JourneyPatternPointsSerializer(PTReferentialSerializer):
journey_pattern_points = pt.JourneyPatternPointSerializer(many=True)
class CommercialModesSerializer(PTReferentialSerializer):
commercial_modes = pt.CommercialModeSerializer(many=True)
class PhysicalModesSerializer(PTReferentialSerializer):
physical_modes = pt.PhysicalModeSerializer(many=True)
class StopPointsSerializer(PTReferentialSerializer):
stop_points = pt.StopPointSerializer(many=True)
class StopAreasSerializer(PTReferentialSerializer):
stop_areas = pt.StopAreaSerializer(many=True)
class RoutesSerializer(PTReferentialSerializer):
routes = pt.RouteSerializer(many=True)
class LineGroupsSerializer(PTReferentialSerializer):
line_groups = pt.LineGroupSerializer(many=True)
class NetworksSerializer(PTReferentialSerializer):
networks = pt.NetworkSerializer(many=True)
class ConnectionsSerializer(PTReferentialSerializer):
connections = pt.ConnectionSerializer(many=True)
class CompaniesSerializer(PTReferentialSerializer):
companies = pt.CompanieSerializer(many=True)
class PoiTypesSerializer(PTReferentialSerializer):
poi_types = pt.PoiTypeSerializer(many=True)
class PoisSerializer(PTReferentialSerializer):
pois = pt.PoiSerializer(many=True)
class ContributorsSerializer(PTReferentialSerializer):
contributors = pt.ContributorSerializer(many=True)
class DatasetsSerializer(PTReferentialSerializerNoContext):
datasets = pt.DatasetSerializer(many=True)
context = MethodField(schema_type=ContextSerializer(), display_none=False)
def get_context(self, obj):
return ContextSerializer(obj, is_utc=True, display_none=False).data
class PlacesSerializer(serpy.Serializer):
error = ErrorSerializer(display_none=False)
feed_publishers = FeedPublisherSerializer(many=True, display_none=True)
disruptions = pt.DisruptionSerializer(attr='impacts', many=True, display_none=True)
places = pt.PlaceSerializer(many=True)
context = MethodField(schema_type=ContextSerializer(), display_none=False)
links = DescribedField(schema_type=LinkSchema(many=True))
def get_context(self, obj):
return ContextSerializer(obj, display_none=False).data
class PtObjectsSerializer(serpy.Serializer):
error = ErrorSerializer(display_none=False)
feed_publishers = FeedPublisherSerializer(many=True, display_none=True)
disruptions = pt.DisruptionSerializer(attr='impacts', many=True, display_none=True)
pt_objects = pt.PtObjectSerializer(many=True, attr='places')
links = DescribedField(schema_type=LinkSchema(many=True))
context = MethodField(schema_type=ContextSerializer(), display_none=False)
def get_context(self, obj):
return ContextSerializer(obj, False, display_none=False).data
class PlacesNearbySerializer(PTReferentialSerializer):
places_nearby = pt.PlaceNearbySerializer(many=True)
class CoverageDateTimeField(DateTimeDictField):
"""
custom date time field for coverage, uses the coverage's timezone to format the date
"""
def __init__(self, field_name=None, **kwargs):
super(CoverageDateTimeField, self).__init__(**kwargs)
self.field_name = field_name
def to_value(self, coverage):
tz_name = coverage.get('timezone')
field_value = coverage.get(self.field_name)
if not tz_name or field_value is None:
return None
dt = navitia_utcfromtimestamp(field_value)
if not dt:
return NOT_A_DATE_TIME
tz = pytz.timezone(tz_name)
if not tz:
return None
dt = pytz.utc.localize(dt)
dt = dt.astimezone(tz)
return dt.strftime("%Y%m%dT%H%M%S")
class CoverageSerializer(NullableDictSerializer):
id = Field(attr="region_id", schema_type=str, display_none=True, description='Identifier of the coverage')
start_production_date = Field(
schema_type=str,
description='Beginning of the production period. ' 'We only have data on this production period',
)
end_production_date = Field(
schema_type=str,
description='End of the production period. ' 'We only have data on this production period',
)
last_load_at = LambdaField(
method=lambda _, o: CoverageDateTimeField('last_load_at').to_value(o),
description='Datetime of the last data loading',
schema_type=str,
)
name = Field(schema_type=str, display_none=True, description='Name of the coverage')
status = Field(schema_type=str)
shape = Field(schema_type=str, display_none=True, description='GeoJSON of the shape of the coverage')
error = CoverageErrorSerializer(display_none=False)
dataset_created_at = Field(schema_type=str, description='Creation date of the dataset')
class CoveragesSerializer(serpy.DictSerializer):
regions = CoverageSerializer(many=True)
links = DescribedField(schema_type=LinkSchema(many=True))
context = MethodField(schema_type=ContextSerializer(), display_none=False)
def get_context(self, obj):
return ContextSerializer(obj, is_utc=True, display_none=False).data
class JourneysCommon(PbNestedSerializer):
error = ErrorSerializer(display_none=False)
feed_publishers = FeedPublisherSerializer(many=True, display_none=True)
links = MethodField(schema_type=LinkSchema(many=True), display_none=True)
def get_links(self, obj):
# note: some request args can be there several times,
# but when there is only one elt, flask does not want lists
response = []
for value in obj.links:
args = {}
for e in value.kwargs:
if len(e.values) > 1:
args[e.key] = [v for v in e.values]
else:
args[e.key] = e.values[0]
args["_type"] = value.type
args["templated"] = value.is_templated
args["description"] = value.description
args["rel"] = value.rel
response.append(create_external_link('v1.{}'.format(value.ressource_name), **args))
return response
class JourneysSerializer(JourneysCommon):
journeys = JourneySerializer(many=True)
tickets = TicketSerializer(many=True, display_none=True)
disruptions = pt.DisruptionSerializer(attr='impacts', many=True, display_none=True)
context = MethodField(schema_type=ContextSerializer(), display_none=False)
notes = DescribedField(schema_type=NoteSerializer(many=True))
exceptions = DescribedField(schema_type=ExceptionSerializer(many=True))
def get_context(self, obj):
return ContextSerializer(obj, display_none=False).data
class SchedulesSerializer(PTReferentialSerializer):
exceptions = DescribedField(schema_type=ExceptionSerializer(many=True))
class DeparturesSerializer(SchedulesSerializer):
departures = schedule.PassageSerializer(many=True, attr='next_departures', display_none=True)
class ArrivalsSerializer(SchedulesSerializer):
arrivals = schedule.PassageSerializer(many=True, attr='next_arrivals', display_none=True)
class StopSchedulesSerializer(SchedulesSerializer):
stop_schedules = schedule.StopScheduleSerializer(many=True, display_none=True)
class RouteSchedulesSerializer(SchedulesSerializer):
route_schedules = schedule.RouteScheduleSerializer(many=True, display_none=True)
class LineReportsSerializer(PTReferentialSerializer):
line_reports = report.LineReportSerializer(many=True, display_none=True)
warnings = base.BetaEndpointsSerializer()
class EquipmentReportsSerializer(PTReferentialSerializer):
equipment_reports = report.EquipmentReportSerializer(many=True, display_none=True)
warnings = base.BetaEndpointsSerializer()
class TrafficReportsSerializer(PTReferentialSerializer):
traffic_reports = report.TrafficReportSerializer(many=True, display_none=True)
class CalendarsSerializer(PTReferentialSerializer):
calendars = pt.CalendarSerializer(many=True, display_none=True)
class StatusSerializer(serpy.DictSerializer):
status = status.StatusSerializer()
context = MethodField(schema_type=ContextSerializer(), display_none=False)
warnings = base.BetaEndpointsSerializer()
def get_context(self, obj):
return ContextSerializer(obj, is_utc=True, display_none=False).data
class GeoStatusSerializer(serpy.DictSerializer):
geo_status = geo_status.GeoStatusSerializer()
context = MethodField(schema_type=ContextSerializer(), display_none=False)
def get_context(self, obj):
return ContextSerializer(obj, is_utc=True, display_none=False).data
class GraphicalIsrochoneSerializer(JourneysCommon):
isochrones = graphical_isochron.GraphicalIsrochoneSerializer(attr='graphical_isochrones', many=True)
warnings = base.BetaEndpointsSerializer()
context = MethodField(schema_type=ContextSerializer(), display_none=False)
def get_context(self, obj):
return ContextSerializer(obj, display_none=False).data
class HeatMapSerializer(JourneysCommon):
heat_maps = heat_map.HeatMapSerializer(many=True)
warnings = base.BetaEndpointsSerializer()
context = MethodField(schema_type=ContextSerializer(), display_none=False)
def get_context(self, obj):
return ContextSerializer(obj, display_none=False).data
class DictAddressesSerializer(serpy.DictSerializer):
address = MethodField(schema_type=AddressSerializer(many=False, display_none=False))
context = MethodField(schema_type=ContextSerializer(), display_none=False)
regions = jsonschema.Field(schema_type=str, display_none=True, many=True)
message = MethodField(schema_type=str)
def get_context(self, obj):
return ContextSerializer(obj, display_none=False).data
def get_address(self, obj):
return obj.get('address', None)
def get_message(self, obj):
return obj.get('message')
class TechnicalStatusSerializer(NullableDictSerializer):
regions = status.CommonStatusSerializer(many=True, display_none=False)
jormungandr_version = Field(schema_type=str, display_none=True)
bss_providers = status.BssProviderSerializer(many=True, display_none=False)
context = MethodField(schema_type=ContextSerializer(), display_none=False)
warnings = base.BetaEndpointsSerializer()
redis = status.RedisStatusSerializer(display_none=False)
def get_context(self, obj):
return ContextSerializer(obj, is_utc=True, display_none=False).data
| agpl-3.0 |
poryfly/scikit-learn | sklearn/linear_model/coordinate_descent.py | 43 | 75144 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Gael Varoquaux <gael.varoquaux@inria.fr>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already float64 Fortran ordered when bypassing
# checks
check_input = 'check_input' not in params or params['check_input']
pre_fit = 'check_input' not in params or params['pre_fit']
if check_input:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F',
copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if pre_fit:
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False,
copy=False, Xy_precompute_order='F')
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, 'csc', dtype=np.float64,
order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
# We expect X and y to be already float64 Fortran ordered arrays
# when bypassing checks
if check_input:
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F',
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False, Xy_precompute_order='F')
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False,
pre_fit=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
r-mart/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 179 | 15664 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
antoinecarme/pyaf | tests/probabilistic_forecasting/test_ozone.py | 1 | 1441 | from __future__ import absolute_import
import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
b1 = tsds.load_ozone()
df = b1.mPastData
#df.tail(10)
#df[:-10].tail()
#df[:-10:-1]
#df.describe()
lEngine = autof.cForecastEngine()
lEngine
H = b1.mHorizon;
# lEngine.mOptions.enable_slow_mode();
lEngine.mOptions.mDebugPerformance = True;
lEngine.mOptions.mModelSelection_Criterion = "CRPS"
lEngine.train(df , b1.mTimeVar , b1.mSignalVar, H);
lEngine.getModelInfo();
print(lEngine.mSignalDecomposition.mTrPerfDetails.columns);
lColumns = ['Split', 'Transformation', 'Model', 'Category', 'Complexity',
'FitCRPS', 'ForecastCRPS', 'TestCRPS']
print(lEngine.mSignalDecomposition.mTrPerfDetails[lColumns].head(10));
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
lEngine.standardPlots("outputs/my_ozone");
dfapp_in = df.copy();
dfapp_in.tail()
#H = 12
dfapp_out = lEngine.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/ozone_apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[b1.mTimeVar , b1.mSignalVar, b1.mSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H));
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
| bsd-3-clause |
Avikalp7/image-aesthetics-learning | src/Learning/selected_feature.py | 1 | 15701 | """
Constructing the final feature vectors with selected features from the initial 56
along with RAG cut features.
"""
from __future__ import division
from scipy import misc
import numpy as np
from skimage import color
from skimage import data
import os
import PIL
from PIL import Image
from pywt import wavedec2
from sklearn.cluster import KMeans
from disjoint_sets import Graph
# from disjoint_sets import countIslands
global IH, IS, IV, path, image_sizes
global LH, HL, HH, S1, S2, S3
global _f10, _f11, _f12, _f13, _f14, _f15, _f16, _f17, _f18
# Parameter K for Kmeans is set here
kmeans_cluster_num = 12
# Some images (b/w) give zero values on S1, S2, S3 - leading to division by zero
def check_zero(epsilon = 50):
global S1, S2, S3
if S1 == 0:
S1 = epsilon
if S2 == 0:
S2 = epsilon
if S3 == 0:
S3 = epsilon
# Prerequiste for features _f10,11,12, calculating LL, LH, HL, HH for 3-level 2-D Discrete Wavelet Transform
def prereq_f10_f11_f12(i):
global S1, S2, S3, LH, HL, HH
HL = LH = HH = [0]*3
coeffs = wavedec2(IH[i], 'db1', level = 3)
LL, (HL[2], LH[2], HH[2]), (HL[1], LH[1], HH[1]), (HL[0], LH[0], HH[0]) = coeffs
S1 = sum(sum(abs(LH[0]))) + sum(sum(abs(HL[0]))) + sum(sum(abs(HH[0])))
S2 = sum(sum(abs(LH[1]))) + sum(sum(abs(HL[1]))) + sum(sum(abs(HH[1])))
S3 = sum(sum(abs(LH[2]))) + sum(sum(abs(HL[2]))) + sum(sum(abs(HH[2])))
# print('S1, S2, S3',S1, S2, S3)
check_zero()
# Prerequiste for features _f10,11,12, calculating LL, LH, HL, HH for 3-level 2-D Discrete Wavelet Transform
def prereq_f13_f14_f15(i):
global S1, S2, S3, LL, HL, HH
HL = LH = HH = [0]*3
coeffs = wavedec2(IS[i], 'db1', level = 3)
LL, (HL[2], HL[2], HH[2]), (HL[1], HL[1], HH[1]), (HL[0], HL[0], HH[0]) = coeffs
S1 = sum(sum(abs(LH[0]))) + sum(sum(abs(HL[0]))) + sum(sum(abs(HH[0])))
S2 = sum(sum(abs(LH[1]))) + sum(sum(abs(HL[1]))) + sum(sum(abs(HH[1])))
S3 = sum(sum(abs(LH[2]))) + sum(sum(abs(HL[2]))) + sum(sum(abs(HH[2])))
check_zero()
# Prerequiste for features _f10,11,12, calculating LL, LH, HL, HH for 3-level 2-D Discrete Wavelet Transform
def prereq_f16_f17_f18(i):
global S1, S2, S3, LL, HL, HH
HL = LH = HH = [0]*3
coeffs = wavedec2(IV[i], 'db1', level = 3)
LL, (HL[2], HL[2], HH[2]), (HL[1], HL[1], HH[1]), (HL[0], HL[0], HH[0]) = coeffs
S1 = sum(sum(abs(LH[0]))) + sum(sum(abs(HL[0]))) + sum(sum(abs(HH[0])))
S2 = sum(sum(abs(LH[1]))) + sum(sum(abs(HL[1]))) + sum(sum(abs(HH[1])))
S3 = sum(sum(abs(LH[2]))) + sum(sum(abs(HL[2]))) + sum(sum(abs(HH[2])))
check_zero()
def segmentation(graph):
row = len(graph)
col = len(graph[0])
g = Graph(row, col, graph)
dic = {}
for cluster_num in range(kmeans_cluster_num):
# print ("Number of points in cluster number", cluster_num, "is: ")
dic[cluster_num] = g.countIslands(cluster_num)
# print('Len pathces = ', len(dic[cluster_num][1]), ' Len lis = ', len(dic[cluster_num][0]))
# print('i, BLOB_COUNT = ', i, blob_count)
# print('Ending K-Means')
return dic
def segments(dic):
all_lengths = []
all_patches = []
for key in dic:
all_lengths += dic[key][0]
all_patches += dic[key][1]
# print (len(all_lengths), len(all_patches))
all_lengths = np.array(all_lengths)
all_patches = np.array(all_patches)
max_5_indices = all_lengths.argsort()[-5:][::-1] # np.array
return all_patches[max_5_indices]
# Exposure of Light
def f1(i):
return sum(sum(IV[i]))/(IV.shape[0] * IV.shape[1])
# Average Saturation / Saturation Indicator
def f3(i):
return sum(sum(IS[i]))/(IS.shape[0] * IS.shape[1])
# Average Hue / Hue Indicator
def f4(i):
return sum(sum(IH[i]))/(IH.shape[0] * IH.shape[1])
# Average hue in inner rectangle for rule of thirds inference
def f5(i):
X = IH[i].shape[0]
Y = IH[i].shape[1]
return sum(sum(IH[i, int(X/3) : int(2*X/3), int(Y/3) : int(2*Y/3)])) * 9 / (X * Y)
# Average saturation in inner rectangle for rule of thirds inference
def f6(i):
X = IS[i].shape[0]
Y = IS[i].shape[1]
return sum(sum(IS[i, int(X/3) : int(2*X/3), int(Y/3) : int(2*Y/3)])) * (9/(X * Y))
# Average V in inner rectangle for rule of thirds inference
def f7(i):
X = IV[i].shape[0]
Y = IV[i].shape[1]
return sum(sum(IV[i, int(X/3) : int(2*X/3), int(Y/3) : int(2*Y/3)])) * (9/(X * Y))
# Spacial Smoothness of first level of Hue property
def f10(i):
global _f10
prereq_f10_f11_f12(i)
_f10 = (1/S1)*(sum(sum(HH[0])) + sum(sum(HL[0])) + sum(sum(LH[0])))
return _f10
# Spacial Smoothness of second level of Hue property
def f11(i):
global _f11
_f11 = (1/S2)*(sum(sum(HH[1])) + sum(sum(HL[1])) + sum(sum(LH[1])))
return _f11
# Spacial Smoothness of third level of Hue property
def f12(i):
global _f12
_f12 = (1/S3)*(sum(sum(HH[2])) + sum(sum(HL[2])) + sum(sum(LH[2])))
return _f12
# Spacial Smoothness of first level of Saturation property
def f13(i):
global _f13
prereq_f13_f14_f15(i)
_f13 = (1/S1)*(sum(sum(HH[0])) + sum(sum(HL[0])) + sum(sum(LH[0])))
return _f13
# Spacial Smoothness of second level of Saturation property
def f14(i):
global _f14
_f14 = (1/S2)*(sum(sum(HH[1])) + sum(sum(HL[1])) + sum(sum(LH[1])))
return _f14
# Spacial Smoothness of third level of Saturation property
def f15(i):
global _f15
_f15 = (1/S3)*(sum(sum(HH[2])) + sum(sum(HL[2])) + sum(sum(LH[2])))
return _f15
# Spacial Smoothness of first level of Intensity property
def f16(i):
global _f16
prereq_f16_f17_f18(i)
_f16 = (1/S1)*(sum(sum(HH[0])) + sum(sum(HL[0])) + sum(sum(LH[0])))
return _f16
# Spacial Smoothness of second level of Intensity property
def f17(i):
global _f17
_f17 = (1/S2)*(sum(sum(HH[1])) + sum(sum(HL[1])) + sum(sum(LH[1])))
return _f17
# Spacial Smoothness of third level of Intensity property
def f18(i):
global _f18
_f18 = (1/S3)*(sum(sum(HH[2])) + sum(sum(HL[2])) + sum(sum(LH[2])))
return _f18
# Sum of the average wavelet coefficients over all three frequency levels of Hue property
def f19(i):
return _f10 + _f11 + _f12
# Sum of the average wavelet coefficients over all three frequency levels of Saturation property
def f20(i):
return _f13 + _f14 + _f15
# Sum of the average wavelet coefficients over all three frequency levels of Intensity property
def f21(i):
return _f16 + _f17 + _f18
# Image Size feature
def f22(i):
return image_sizes[i][0] + image_sizes[i][1]
# Aspect Ratio Feature
def f23(i):
return image_sizes[i][0] / float(image_sizes[i][1])
# Number of patches > XY/100 pixels, how many disconnected significantly large regions are present
def f24(i, s):
count = 0
for si in s:
if len(si) >= 164:
count += 1
return count
# Number of different color blobs / color complexity of image
def f25(i, dic):
count = 0
for key in dic:
max_length = max(dic[key][0])
if max_length > 1000:
count += 1
return count
# Average Hue value for patch 1
def f26(i, s):
si = s[0]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IH[i][j][k]
return sum_/len(si)
# Average Hue value for patch 2
def f27(i, s):
si = s[1]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IH[i][j][k]
return sum_/len(si)
# Average Hue value for patch 3
def f28(i, s):
si = s[2]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IH[i][j][k]
return sum_/len(si)
# Average Hue value for patch 4
def f29(i, s):
si = s[3]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IH[i][j][k]
return sum_/len(si)
# Average Hue value for patch 5
def f30(i, s):
si = s[4]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IH[i][j][k]
return sum_/len(si)
# Average Saturation value for patch 1
def f31(i, s):
si = s[0]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IS[i][j][k]
return sum_/len(si)
# Average Saturation value for patch 2
def f32(i, s):
si = s[1]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IS[i][j][k]
return sum_/len(si)
# Average Saturation value for patch 3
def f33(i, s):
si = s[2]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IS[i][j][k]
return sum_/len(si)
# Average Saturation value for patch 4
def f34(i, s):
si = s[3]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IS[i][j][k]
return sum_/len(si)
# Average Saturation value for patch 5
def f35(i, s):
si = s[4]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IS[i][j][k]
return sum_/len(si)
# Average Intensity value for patch 1
def f36(i, s):
si = s[0]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IV[i][j][k]
return sum_/len(si)
# Average Intensity value for patch 2
def f37(i, s):
si = s[1]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IV[i][j][k]
return sum_/len(si)
# Average Intensity value for patch 3
def f38(i, s):
si = s[2]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IV[i][j][k]
return sum_/len(si)
# Average Intensity value for patch 4
def f39(i, s):
si = s[3]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IV[i][j][k]
return sum_/len(si)
# Average Intensity value for patch 5
def f40(i, s):
si = s[4]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IV[i][j][k]
return sum_/len(si)
# Measure of largest patch
def f41(i):
si = s[0]
return len(si)/16384
def f42(i):
si = s[1]
return len(si)/16384
def f43(i):
si = s[2]
return len(si)/16384
def f44(i):
si = s[3]
return len(si)/16384
def f45(i):
si = s[4]
return len(si)/16384
def f46(i, h):
sumh = 0
for j in range(5):
for k in range(5):
sumh += abs(h[j] - h[k])
return sumh
def f47(i, h):
sumh = 0
for j in range(5):
for k in range(5):
t = abs(h[j] - h[k])
if t < 0.5:
sumh += 360*t
else:
sumh += 360 - 360*t
return sumh
def f48_pre(i, s):
centers = []
for si in s:
point_sum_x = 0
point_sum_y = 0
for point in si:
x, y = point
point_sum_x += x
point_sum_y += y
x = point_sum_x/len(si)
y = point_sum_y/len(si)
centers.append([x,y])
return centers
def f48(i, s):
centers = f48_pre(i, s)
n = 0
c = centers[n]
if c[0] < 43:
r = 10
elif c[1] < 86:
r = 20
else:
r = 30
if c[1] < 43:
cc = 1
elif c[1] < 86:
cc = 2
else:
cc = 3
return r + cc
def f49(i, s):
centers = f48_pre(i, s)
n = 1
c = centers[n]
if c[0] < 43:
r = 10
elif c[1] < 86:
r = 20
else:
r = 30
if c[1] < 43:
cc = 1
elif c[1] < 86:
cc = 2
else:
cc = 3
return r + cc
def f50(i, s):
centers = f48_pre(i, s)
n = 2
c = centers[n]
if c[0] < 43:
r = 10
elif c[1] < 86:
r = 20
else:
r = 30
if c[1] < 43:
cc = 1
elif c[1] < 86:
cc = 2
else:
cc = 3
return r + cc
def f51(i, s):
centers = f48_pre(i, s)
n = 3
c = centers[n]
if c[0] < 43:
r = 10
elif c[1] < 86:
r = 20
else:
r = 30
if c[1] < 43:
cc = 1
elif c[1] < 86:
cc = 2
else:
cc = 3
return r + cc
def f52(i, s):
centers = f48_pre(i, s)
n = 4
c = centers[n]
if c[0] < 43:
r = 10
elif c[1] < 86:
r = 20
else:
r = 30
if c[1] < 43:
cc = 1
elif c[1] < 86:
cc = 2
else:
cc = 3
return r + cc
# DoF feature for Hue property
def f53(i):
prereq_f10_f11_f12(i)
v1 = v2 = v3 = 0
sumv1 = sum(sum(LH[2]))
if sumv1 > 0:
v1 = sum(sum(abs(LH[2][4:12,4:12]))) / sumv1
sumv2 = sum(sum(HL[2]))
if sumv2 > 0:
v2 = sum(sum(abs(HL[2][4:12,4:12]))) / sumv2
sumv3 = sum(sum(HH[2]))
if sumv3 > 0:
v3 = sum(sum(abs(HH[2][4:12,4:12]))) / sumv3
if sumv1 == 0:
v1 = (v2 + v3)/2
if sumv2 == 0:
v2 = (v1 + v3)/2
if sumv3 == 0:
v3 = (v1 + v2)/2
return v1 + v2 + v3
# DoF feature for Saturation property
def f54(i):
prereq_f13_f14_f15(i)
v1 = v2 = v3 = 0
sumv1 = sum(sum(LH[2]))
if sumv1 > 0:
v1 = sum(sum(abs(LH[2][4:12,4:12]))) / sumv1
sumv2 = sum(sum(HL[2]))
if sumv2 > 0:
v2 = sum(sum(abs(HL[2][4:12,4:12]))) / sumv2
sumv3 = sum(sum(HH[2]))
if sumv3 > 0:
v3 = sum(sum(abs(HH[2][4:12,4:12]))) / sumv3
if sumv1 == 0:
v1 = (v2 + v3)/2
if sumv2 == 0:
v2 = (v1 + v3)/2
if sumv3 == 0:
v3 = (v1 + v2)/2
return v1 + v2 + v3
# DoF feature for Intensity property
def f55(i):
prereq_f16_f17_f18(i)
v1 = v2 = v3 = 0
sumv1 = sum(sum(LH[2]))
if sumv1 > 0:
v1 = sum(sum(abs(LH[2][4:12,4:12]))) / sumv1
sumv2 = sum(sum(HL[2]))
if sumv2 > 0:
v2 = sum(sum(abs(HL[2][4:12,4:12]))) / sumv2
sumv3 = sum(sum(HH[2]))
if sumv3 > 0:
v3 = sum(sum(abs(HH[2][4:12,4:12]))) / sumv3
if sumv1 == 0:
v1 = (v2 + v3)/2
if sumv2 == 0:
v2 = (v1 + v3)/2
if sumv3 == 0:
v3 = (v1 + v2)/2
return v1 + v2 + v3
path = "/home/avikalp/semester6/SIGIR/photonet_dataset/images/"
if __name__ == '__main__':
# graph = [[1, 1, 0, 0, 0],
# [0, 1, 0, 0, 2],
# [1, 0, 0, 2, 2],
# [0, 0, 0, 0, 0],
# [1, 0, 1, 0, 1]]
# row = len(graph)
# col = len(graph[0])
# g= Graph(row, col, graph)
# k = 0
# print ("Number of islands is :",)
# print(g.countIslands(k))
# exit()
subset_indices = list(np.load('good_indices.npy'))
image_sizes = list(np.load('image_sizes_40p.npy'))
print('Loading IHSV...')
IH = np.load('IH_40p.npy')
IS = np.load('IS_40p.npy')
IV = np.load('IV_40p.npy')
print('IV','IHSV loaded.')
print('Loading LUV...')
LUV = np.load('LUV_40p.npy')
print('LUV loaded.')
feature_vec = []
for i, index in enumerate(subset_indices):
print (i)
feature_vec.append([])
feature_vec[i].append(f1(i))
# feature_vec[i].append(f2(i))
# feature_vec[i].append(f3(i))
# feature_vec[i].append(f4(i))
# feature_vec[i].append(f5(i))
feature_vec[i].append(f6(i))
# feature_vec[i].append(f7(i))
# feature_vec[i].append(f8(i))
# feature_vec[i].append(f9(i))
# feature_vec[i].append(f10(i))
# feature_vec[i].append(f11(i))
# feature_vec[i].append(f12(i))
# feature_vec[i].append(f13(i))
# feature_vec[i].append(f14(i))
feature_vec[i].append(f15(i))
# feature_vec[i].append(f16(i))
feature_vec[i].append(f17(i))
# feature_vec[i].append(f18(i))
# feature_vec[i].append(f19(i))
feature_vec[i].append(f20(i))
feature_vec[i].append(f21(i))
feature_vec[i].append(f22(i))
feature_vec[i].append(f23(i))
# print('Starting K-Means')
# kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
# kmeans.labels_
# kmeans.predict([[0, 0], [4, 4]])
_LUV = LUV[i].reshape((16384, 3))
kmeans = KMeans(n_clusters=kmeans_cluster_num, random_state=0).fit(_LUV)
# centers = kmeans.cluster_centers_
graph = kmeans.labels_
graph = graph.reshape((128,128))
dic = segmentation(graph)
s = list(segments(dic))
H = []
for k in range(5):
sumh = 0
for i1, j1 in s[k]:
sumh += IH[i][i1][j1]
H.append(sumh)
# feature_vec[i].append(f24(i, s))
feature_vec[i].append(f25(i, dic))
# feature_vec[i].append(f26(i, s))
# feature_vec[i].append(f27(i, s))
feature_vec[i].append(f28(i, s))
# feature_vec[i].append(f29(i, s))
# feature_vec[i].append(f30(i, s))
feature_vec[i].append(f31(i, s))
# feature_vec[i].append(f32(i, s))
# feature_vec[i].append(f33(i, s))
# feature_vec[i].append(f34(i, s))
# feature_vec[i].append(f35(i, s))
# feature_vec[i].append(f36(i, s))
# feature_vec[i].append(f37(i, s))
# feature_vec[i].append(f38(i, s))
# feature_vec[i].append(f39(i, s))
# feature_vec[i].append(f40(i, s))
# feature_vec[i].append(f41(i))
# feature_vec[i].append(f42(i))
feature_vec[i].append(f43(i))
# feature_vec[i].append(f44(i))
# feature_vec[i].append(f45(i))
# feature_vec[i].append(f46(i, H))
# feature_vec[i].append(f47(i, H))
# feature_vec[i].append(f48(i, s))
# feature_vec[i].append(f49(i, s))
# feature_vec[i].append(f50(i, s))
# feature_vec[i].append(f51(i, s))
# feature_vec[i].append(f52(i, s))
# feature_vec[i].append(f53(i))
feature_vec[i].append(f54(i))
# feature_vec[i].append(f55(i))
# feature_vec[i].append(f56(i))
# -------------------------- #
# Do something
#
#
# del feature_vec[i][:]
np.save('../../data/selected_feature_vecs.npy', feature_vec) | mit |
nouiz/pylearn2 | pylearn2/training_algorithms/default.py | 44 | 6791 | """
A generic training algorithm that implements no real training code of its
own but just calls the model.train_batch method on minibatches of data.
"""
import functools
from theano.compat.six.moves import xrange
from pylearn2.monitor import Monitor
from pylearn2.training_algorithms.training_algorithm import TrainingAlgorithm
from pylearn2.utils import safe_zip
from pylearn2.utils.data_specs import DataSpecsMapping
class DefaultTrainingAlgorithm(TrainingAlgorithm):
"""
A generic training algorithm that implements no real training code of its
own but just calls the model.train_batch method on minibatches of data.
Parameters
----------
batch_size : int, optional
If batch_size is None, reverts to the `force_batch_size` field of
the model
batches_per_iter : int, optional
WRITEME
monitoring_batch_size : int, optional
Size of monitoring batches.
monitoring_batches : int, optional
WRITEME
monitoring_dataset : Dataset or dict, optional
A Dataset or a dictionary mapping string dataset names to Datasets
termination_criterion : WRITEME
If specified, can cause the algorithm to terminate before
`model.learn_batch` says to
set_batch_size : bool, optional
If True, if `model` has a batch size but is not forced to use that
one, the training algorithm will set the model to use `batch_size`
instead.
"""
def __init__(self, batch_size=None, batches_per_iter=1000,
monitoring_batch_size=None, monitoring_batches=-1,
monitoring_dataset=None, termination_criterion=None,
set_batch_size=False):
self.__dict__.update(locals())
del self.self
if monitoring_dataset is None:
assert monitoring_batches == -1
assert monitoring_batch_size is None
self._set_monitoring_dataset(monitoring_dataset)
self.monitoring_batches = monitoring_batches
self.bSetup = False
self.termination_criterion = termination_criterion
def setup(self, model, dataset):
"""
Allows the training algorithm to do some preliminary configuration
*before* we actually start training the model. The dataset is provided
in case other derived training algorithms need to modify model based on
the dataset.
Parameters
----------
model : object
Python object representing the model to train loosely
implementing the interface of models.model.Model.
dataset : pylearn2.datasets.dataset.Dataset
Dataset object used to draw training data
"""
self._synchronize_batch_size(model)
self.model = model
self.monitor = Monitor.get_monitor(model)
if self.monitoring_dataset is not None:
# Get the data specifications needed by the model
space, source = model.get_monitoring_data_specs()
# Create Theano variables for each of the individual components
# of that data. Usually, it will be X for inputs and Y for targets.
# First, we need to find these components, and put them in a tuple
mapping = DataSpecsMapping((space, source))
space_tuple = mapping.flatten(space, return_tuple=True)
source_tuple = mapping.flatten(source, return_tuple=True)
# Then, build a flat tuple of these Theano variables
ipt = tuple(sp.make_theano_batch(name='monitor_%s' % src)
for (sp, src) in safe_zip(space_tuple, source_tuple))
# Finally, organize them back into a structure expected by the
# monitoring channels of the model
nested_ipt = mapping.nest(ipt)
channels = model.get_monitoring_channels(nested_ipt)
if not isinstance(channels, dict):
raise TypeError("model.get_monitoring_channels must return a "
"dictionary, but it returned " + str(channels))
for dataset_name in self.monitoring_dataset:
if dataset_name == '':
prefix = ''
else:
prefix = dataset_name + '_'
monitoring_dataset = self.monitoring_dataset[dataset_name]
if (self.monitoring_batch_size is None and
self.monitoring_batches == -1):
self.monitoring_batch_size = self.batch_size
self.monitoring_batches = self.batches_per_iter
self.monitor.add_dataset(dataset=monitoring_dataset,
mode="sequential",
batch_size=self.monitoring_batch_size,
num_batches=self.monitoring_batches)
for name in channels:
J = channels[name]
if isinstance(J, tuple):
assert len(J) == 2
J, prereqs = J
else:
prereqs = None
self.monitor.add_channel(name=prefix + name,
ipt=nested_ipt,
val=J,
dataset=monitoring_dataset,
prereqs=prereqs,
data_specs=(space, source))
self.first = True
self.bSetup = True
@functools.wraps(TrainingAlgorithm.train)
def train(self, dataset):
assert self.bSetup
model = self.model
batch_size = self.batch_size
for i in xrange(self.batches_per_iter):
# model.train_batch and self.train both return False when training
# should terminate.
learn_more = model.train_batch(dataset, batch_size)
model.monitor.report_batch(batch_size)
if not learn_more:
break
# Make sure we didn't exit training loop because Model.learn
# hasn't been updated to new interface yet.
if learn_more not in [True, False]:
msg = ('The learn method of model %s did not return a boolean ' +
'value. Please update your model accordingly.')
raise ValueError(msg % str(model))
self.learn_more = learn_more
def continue_learning(self, model):
"""
.. todo::
WRITEME
"""
if self.learn_more:
if self.termination_criterion is not None:
return self.termination_criterion.continue_learning(model)
return True
return False
| bsd-3-clause |
CforED/Machine-Learning | sklearn/tests/test_cross_validation.py | 19 | 46586 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
inspirehep/invenio | modules/websearch/lib/search_engine.py | 2 | 334473 | # -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009,
## 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2018, 2019, 2020 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301,W0703
"""Invenio Search Engine in mod_python."""
__lastupdated__ = """$Date$"""
__revision__ = "$Id$"
## import general modules:
import cgi
import cStringIO
import copy
import os
import re
import time
import string
import urllib
import urlparse
import zlib
import sys
try:
## import optional module:
import numpy
CFG_NUMPY_IMPORTABLE = True
except ImportError:
CFG_NUMPY_IMPORTABLE = False
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
## import Invenio stuff:
from invenio.config import \
CFG_CERN_SITE, \
CFG_INSPIRE_SITE, \
CFG_SCOAP3_SITE, \
CFG_OAI_ID_FIELD, \
CFG_WEBCOMMENT_ALLOW_REVIEWS, \
CFG_WEBSEARCH_CALL_BIBFORMAT, \
CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX, \
CFG_WEBSEARCH_FIELDS_CONVERT, \
CFG_WEBSEARCH_NB_RECORDS_TO_SORT, \
CFG_WEBSEARCH_SEARCH_CACHE_SIZE, \
CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS, \
CFG_WEBSEARCH_USE_ALEPH_SYSNOS, \
CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, \
CFG_WEBSEARCH_FULLTEXT_SNIPPETS, \
CFG_WEBSEARCH_DISPLAY_NEAREST_TERMS, \
CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS, \
CFG_WEBSEARCH_SYNONYM_KBRS, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_LOGDIR, \
CFG_BIBFORMAT_HIDDEN_TAGS, \
CFG_BIBFORMAT_HIDDEN_RECJSON_FIELDS, \
CFG_SITE_URL, \
CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS, \
CFG_SOLR_URL, \
CFG_WEBSEARCH_DETAILED_META_FORMAT, \
CFG_SITE_RECORD, \
CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT, \
CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY, \
CFG_BIBSORT_BUCKETS, \
CFG_BIBSORT_ENABLED, \
CFG_XAPIAN_ENABLED, \
CFG_BIBINDEX_CHARS_PUNCTUATION, \
CFG_BASE_URL, \
CFG_WEBSEARCH_BLACKLISTED_FORMATS, \
CFG_WEBSEARCH_MAX_RECORDS_REFERSTO, \
CFG_WEBSEARCH_MAX_RECORDS_CITEDBY
from invenio.search_engine_config import \
InvenioWebSearchUnknownCollectionError, \
InvenioWebSearchWildcardLimitError, \
InvenioWebSearchReferstoLimitError, \
InvenioWebSearchCitedbyLimitError, \
CFG_WEBSEARCH_IDXPAIRS_FIELDS,\
CFG_WEBSEARCH_IDXPAIRS_EXACT_SEARCH
from invenio.search_engine_utils import (get_fieldvalues,
get_fieldvalues_alephseq_like,
record_exists)
from invenio.bibrecord import create_record, record_xml_output
from invenio.bibrank_record_sorter import (get_bibrank_methods,
is_method_valid,
rank_records as rank_records_bibrank,
rank_by_citations)
from invenio.bibrank_downloads_similarity import register_page_view_event, calculate_reading_similarity_list
from invenio.bibindex_engine_stemmer import stem
from invenio.bibindex_tokenizers.BibIndexDefaultTokenizer import BibIndexDefaultTokenizer
from invenio.bibindex_tokenizers.BibIndexCJKTokenizer import BibIndexCJKTokenizer, is_there_any_CJK_character_in_text
from invenio.bibindex_engine_utils import author_name_requires_phrase_search, \
get_field_tags
from invenio.bibindex_engine_washer import wash_index_term, lower_index_term, wash_author_name
from invenio.bibindex_engine_config import CFG_BIBINDEX_SYNONYM_MATCH_TYPE
from invenio.bibindex_engine_utils import get_idx_indexer, is_index_using_unicode_520
from invenio.bibformat import format_record, format_records, get_output_format_content_type, create_excel
from invenio.bibrank_downloads_grapher import create_download_history_graph_and_box
from invenio.bibknowledge import get_kbr_values
from invenio.data_cacher import DataCacher
from invenio.websearch_external_collections import print_external_results_overview, perform_external_collection_search
from invenio.access_control_admin import acc_get_action_id
from invenio.access_control_config import VIEWRESTRCOLL, \
CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS, \
CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS
from invenio.websearchadminlib import get_detailed_page_tabs, get_detailed_page_tabs_counts
from invenio.intbitset import intbitset
from invenio.dbquery import DatabaseError, deserialize_via_marshal, InvenioDbQueryWildcardLimitError
from invenio.access_control_engine import acc_authorize_action
from invenio.errorlib import register_exception
from invenio.textutils import encode_for_xml, wash_for_utf8, strip_accents, translate_to_ascii
from invenio.htmlutils import get_mathjax_header
from invenio.htmlutils import nmtoken_from_string
from invenio.urlutils import redirect_to_url
from invenio import bibrecord
import invenio.template
webstyle_templates = invenio.template.load('webstyle')
webcomment_templates = invenio.template.load('webcomment')
from invenio.bibrank_citation_searcher import calculate_cited_by_list, \
calculate_co_cited_with_list, get_records_with_num_cites, \
get_refersto_hitset, get_citedby_hitset, get_cited_by_list, \
get_refers_to_list, get_citers_log
from invenio.bibrank_citation_grapher import create_citation_history_graph_and_box
from invenio.bibrank_selfcites_searcher import get_self_cited_by_list, \
get_self_cited_by, \
get_self_refers_to_list
from invenio.dbquery import run_sql, \
run_sql_with_limit, \
wash_table_column_name, \
get_table_update_time
from invenio.webuser import getUid, collect_user_info, session_param_set
from invenio.webpage import pageheaderonly, pagefooteronly, create_error_box, write_warning
from invenio.messages import gettext_set_language
from invenio.search_engine_query_parser import SearchQueryParenthesisedParser, \
SpiresToInvenioSyntaxConverter
from invenio import webinterface_handler_config as apache
from invenio.solrutils_bibindex_searcher import solr_get_bitset
from invenio.xapianutils_bibindex_searcher import xapian_get_bitset
from invenio.websearch_services import \
get_search_services, \
CFG_WEBSEARCH_SERVICE_MAX_SERVICE_ANSWER_RELEVANCE, \
CFG_WEBSEARCH_SERVICE_MAX_NB_SERVICE_DISPLAY, \
CFG_WEBSEARCH_SERVICE_MIN_RELEVANCE_TO_DISPLAY, \
CFG_WEBSEARCH_SERVICE_MAX_RELEVANCE_DIFFERENCE
try:
import invenio.template
websearch_templates = invenio.template.load('websearch')
except:
pass
from invenio.websearch_external_collections import calculate_hosted_collections_results, do_calculate_hosted_collections_results
from invenio.websearch_external_collections_config import CFG_HOSTED_COLLECTION_TIMEOUT_ANTE_SEARCH
from invenio.websearch_external_collections_config import CFG_HOSTED_COLLECTION_TIMEOUT_POST_SEARCH
from invenio.websearch_external_collections_config import CFG_EXTERNAL_COLLECTION_MAXRESULTS
from invenio.bibauthorid_config import LIMIT_TO_COLLECTIONS as BIBAUTHORID_LIMIT_TO_COLLECTIONS
websearch_templates = invenio.template.load('websearch')
VIEWRESTRCOLL_ID = acc_get_action_id(VIEWRESTRCOLL)
## global vars:
cfg_nb_browse_seen_records = 100 # limit of the number of records to check when browsing certain collection
cfg_nicely_ordered_collection_list = 0 # do we propose collection list nicely ordered or alphabetical?
## precompile some often-used regexp for speed reasons:
re_word = re.compile(r'[\s]')
re_quotes = re.compile('[\'\"]')
re_doublequote = re.compile('\"')
re_logical_and = re.compile(r'\sand\s', re.I)
re_logical_or = re.compile(r'\sor\s', re.I)
re_logical_not = re.compile(r'\snot\s', re.I)
re_operators = re.compile(r'\s([\+\-\|])\s')
re_pattern_wildcards_after_spaces = re.compile(r'(\s)[\*\%]+')
re_pattern_single_quotes = re.compile("'(.*?)'")
re_pattern_double_quotes = re.compile("\"(.*?)\"")
re_pattern_parens_quotes = re.compile(r'[\'\"]{1}[^\'\"]*(\([^\'\"]*\))[^\'\"]*[\'\"]{1}')
re_pattern_regexp_quotes = re.compile(r"\/(.*?)\/")
re_pattern_spaces_after_colon = re.compile(r'(:\s+)')
re_pattern_short_words = re.compile(r'([\s\"]\w{1,3})[\*\%]+')
re_pattern_space = re.compile("__SPACE__")
re_pattern_today = re.compile(r"\$TODAY\$")
re_pattern_parens = re.compile(r'\([^\)]+\s+[^\)]+\)')
re_punctuation_followed_by_space = re.compile(CFG_BIBINDEX_CHARS_PUNCTUATION + r'\s')
## em possible values
EM_REPOSITORY={"body" : "B",
"header" : "H",
"footer" : "F",
"search_box" : "S",
"see_also_box" : "L",
"basket" : "K",
"alert" : "A",
"search_info" : "I",
"overview" : "O",
"all_portalboxes" : "P",
"te_portalbox" : "Pte",
"tp_portalbox" : "Ptp",
"np_portalbox" : "Pnp",
"ne_portalbox" : "Pne",
"lt_portalbox" : "Plt",
"rt_portalbox" : "Prt",
"search_services": "SER"};
class RestrictedCollectionDataCacher(DataCacher):
def __init__(self):
def cache_filler():
ret = []
res = run_sql("""SELECT DISTINCT ar.value
FROM accROLE_accACTION_accARGUMENT raa JOIN accARGUMENT ar ON raa.id_accARGUMENT = ar.id
WHERE ar.keyword = 'collection' AND raa.id_accACTION = %s""", (VIEWRESTRCOLL_ID,), run_on_slave=True)
for coll in res:
ret.append(coll[0])
return ret
def timestamp_verifier():
return max(get_table_update_time('accROLE_accACTION_accARGUMENT'), get_table_update_time('accARGUMENT'))
DataCacher.__init__(self, cache_filler, timestamp_verifier)
def collection_restricted_p(collection, recreate_cache_if_needed=True):
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
return collection in restricted_collection_cache.cache
try:
restricted_collection_cache.is_ok_p
except NameError:
restricted_collection_cache = RestrictedCollectionDataCacher()
def ziplist(*lists):
"""Just like zip(), but returns lists of lists instead of lists of tuples
Example:
zip([f1, f2, f3], [p1, p2, p3], [op1, op2, '']) =>
[(f1, p1, op1), (f2, p2, op2), (f3, p3, '')]
ziplist([f1, f2, f3], [p1, p2, p3], [op1, op2, '']) =>
[[f1, p1, op1], [f2, p2, op2], [f3, p3, '']]
FIXME: This is handy to have, and should live somewhere else, like
miscutil.really_useful_functions or something.
XXX: Starting in python 2.6, the same can be achieved (faster) by
using itertools.izip_longest(); when the minimum recommended Python
is bumped, we should use that instead.
"""
def l(*items):
return list(items)
return map(l, *lists)
def get_permitted_restricted_collections(user_info, recreate_cache_if_needed=True):
"""Return a list of collection that are restricted but for which the user
is authorized."""
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
ret = []
for collection in restricted_collection_cache.cache:
if acc_authorize_action(user_info, 'viewrestrcoll', collection=collection)[0] == 0:
ret.append(collection)
return ret
def get_all_restricted_recids():
"""
Return the set of all the restricted recids, i.e. the ids of those records
which belong to at least one restricted collection.
"""
ret = intbitset()
for collection in restricted_collection_cache.cache:
ret |= get_collection_reclist(collection)
return ret
def get_restricted_collections_for_recid(recid, recreate_cache_if_needed=True):
"""
Return the list of restricted collection names to which recid belongs.
"""
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
collection_reclist_cache.recreate_cache_if_needed()
return [collection for collection in restricted_collection_cache.cache if recid in get_collection_reclist(collection, recreate_cache_if_needed=False)]
def is_user_owner_of_record(user_info, recid):
"""
Check if the user is owner of the record, i.e. he is the submitter
and/or belongs to a owner-like group authorized to 'see' the record.
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: True if the user is 'owner' of the record; False otherwise
@rtype: bool
"""
authorized_emails_or_group = []
for tag in CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS:
authorized_emails_or_group.extend(get_fieldvalues(recid, tag))
for email_or_group in authorized_emails_or_group:
if email_or_group in user_info['group']:
return True
email = email_or_group.strip().lower()
if user_info['email'].strip().lower() == email:
return True
return False
###FIXME: This method needs to be refactorized
def is_user_viewer_of_record(user_info, recid):
"""
Check if the user is allow to view the record based in the marc tags
inside CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS
i.e. his email is inside the 506__m tag or he is inside an e-group listed
in the 506__m tag
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: True if the user is 'allow to view' the record; False otherwise
@rtype: bool
"""
authorized_emails_or_group = []
for tag in CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS:
authorized_emails_or_group.extend(get_fieldvalues(recid, tag))
for email_or_group in authorized_emails_or_group:
if email_or_group in user_info['group']:
return True
email = email_or_group.strip().lower()
if user_info['email'].strip().lower() == email:
return True
return False
def check_user_can_view_record(user_info, recid, ln=CFG_SITE_LANG):
"""
Check if the user is authorized to view the given recid. The function
grants access in two cases: either user has author rights on this
record, or he has view rights to the primary collection this record
belongs to.
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: (0, ''), when authorization is granted, (>0, 'message') when
authorization is not granted
@rtype: (int, string)
"""
_ = gettext_set_language(ln)
policy = CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY.strip().upper()
if isinstance(recid, str):
recid = int(recid)
## At this point, either webcoll has not yet run or there are some
## restricted collections. Let's see first if the user own the record.
if is_user_owner_of_record(user_info, recid):
## Perfect! It's authorized then!
return (0, '')
if is_user_viewer_of_record(user_info, recid):
## Perfect! It's authorized then!
return (0, '')
restricted_collections = get_restricted_collections_for_recid(recid, recreate_cache_if_needed=False)
if not restricted_collections and record_public_p(recid):
## The record is public and not part of any restricted collection
return (0, '')
if restricted_collections:
## If there are restricted collections the user must be authorized to all/any of them (depending on the policy)
auth_code, auth_msg = 0, ''
for collection in restricted_collections:
(auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=collection)
if auth_code and policy != 'ANY':
## Ouch! the user is not authorized to this collection
return (auth_code, auth_msg)
elif auth_code == 0 and policy == 'ANY':
## Good! At least one collection is authorized
return (0, '')
## Depending on the policy, the user will be either authorized or not
return auth_code, auth_msg
if is_record_in_any_collection(recid, recreate_cache_if_needed=False):
## the record is not in any restricted collection
return (0, '')
elif record_exists(recid) > 0:
## We are in the case where webcoll has not run.
## Let's authorize SUPERADMIN
(auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=None)
if auth_code == 0:
return (0, '')
else:
## Too bad. Let's print a nice message:
return (2, _("""The record you are trying to access has just been
submitted to the system and needs to be assigned to the
proper collections. It is currently restricted for security reasons
until the assignment will be fully completed. Please come back later to
properly access this record."""))
else:
## The record either does not exists or has been deleted.
## Let's handle these situations outside of this code.
return (0, '')
class IndexStemmingDataCacher(DataCacher):
"""
Provides cache for stemming information for word/phrase indexes.
This class is not to be used directly; use function
get_index_stemming_language() instead.
"""
def __init__(self):
def cache_filler():
try:
res = run_sql("""SELECT id, stemming_language FROM idxINDEX""")
except DatabaseError:
# database problems, return empty cache
return {}
return dict(res)
def timestamp_verifier():
return get_table_update_time('idxINDEX')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
index_stemming_cache.is_ok_p
except Exception:
index_stemming_cache = IndexStemmingDataCacher()
def get_index_stemming_language(index_id, recreate_cache_if_needed=True):
"""Return stemming langugage for given index."""
if recreate_cache_if_needed:
index_stemming_cache.recreate_cache_if_needed()
return index_stemming_cache.cache[index_id]
class FieldTokenizerDataCacher(DataCacher):
"""
Provides cache for tokenizer information for fields corresponding to indexes.
This class is not to be used directly; use function
get_field_tokenizer_type() instead.
"""
def __init__(self):
def cache_filler():
try:
res = run_sql("""SELECT fld.code, ind.tokenizer FROM idxINDEX AS ind, field AS fld, idxINDEX_field AS indfld WHERE ind.id = indfld.id_idxINDEX AND indfld.id_field = fld.id""")
except DatabaseError:
# database problems, return empty cache
return {}
return dict(res)
def timestamp_verifier():
return get_table_update_time('idxINDEX')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
field_tokenizer_cache.is_ok_p
except Exception:
field_tokenizer_cache = FieldTokenizerDataCacher()
def get_field_tokenizer_type(field_name, recreate_cache_if_needed=True):
"""Return tokenizer type for given field corresponding to an index if applicable."""
if recreate_cache_if_needed:
field_tokenizer_cache.recreate_cache_if_needed()
tokenizer = None
try:
tokenizer = field_tokenizer_cache.cache[field_name]
except KeyError:
return None
return tokenizer
class CollectionRecListDataCacher(DataCacher):
"""
Provides cache for collection reclist hitsets. This class is not
to be used directly; use function get_collection_reclist() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
res = run_sql("SELECT name FROM collection")
for name in res:
ret[name[0]] = None # this will be filled later during runtime by calling get_collection_reclist(coll)
return ret
def timestamp_verifier():
return get_table_update_time('collection')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_reclist_cache.is_ok_p:
raise Exception
except Exception:
collection_reclist_cache = CollectionRecListDataCacher()
def get_collection_reclist(coll, recreate_cache_if_needed=True):
"""Return hitset of recIDs that belong to the collection 'coll'."""
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
if coll not in collection_reclist_cache.cache:
return intbitset() # collection does not exist; return empty set
if not collection_reclist_cache.cache[coll]:
# collection's reclist not in the cache yet, so calculate it
# and fill the cache:
reclist = intbitset()
query = "SELECT nbrecs,reclist FROM collection WHERE name=%s"
res = run_sql(query, (coll, ), 1)
try:
reclist = intbitset(res[0][1])
except (IndexError, TypeError):
pass
collection_reclist_cache.cache[coll] = reclist
# finally, return reclist:
return collection_reclist_cache.cache[coll]
def get_available_output_formats(visible_only=False):
"""
Return the list of available output formats. When visible_only is
True, returns only those output formats that have visibility flag
set to 1.
"""
formats = []
query = "SELECT code,name FROM format"
if visible_only:
query += " WHERE visibility='1'"
query += " ORDER BY name ASC"
res = run_sql(query)
if res:
# propose found formats:
for code, name in res:
formats.append({'value': code,
'text': name
})
else:
formats.append({'value': 'hb',
'text': "HTML brief"
})
return formats
class SearchResultsCache(DataCacher):
"""
Provides temporary lazy cache for Search Results.
Useful when users click on `next page'.
"""
def __init__(self):
def cache_filler():
return {}
def timestamp_verifier():
return '1970-01-01 00:00:00' # lazy cache is always okay;
# its filling is governed by
# CFG_WEBSEARCH_SEARCH_CACHE_SIZE
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not search_results_cache.is_ok_p:
raise Exception
except Exception:
search_results_cache = SearchResultsCache()
class CollectionI18nNameDataCacher(DataCacher):
"""
Provides cache for I18N collection names. This class is not to be
used directly; use function get_coll_i18nname() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
try:
res = run_sql("SELECT c.name,cn.ln,cn.value FROM collectionname AS cn, collection AS c WHERE cn.id_collection=c.id AND cn.type='ln'") # ln=long name
except Exception:
# database problems
return {}
for c, ln, i18nname in res:
if i18nname:
if c not in ret:
ret[c] = {}
ret[c][ln] = i18nname
return ret
def timestamp_verifier():
return get_table_update_time('collectionname')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_i18nname_cache.is_ok_p:
raise Exception
except Exception:
collection_i18nname_cache = CollectionI18nNameDataCacher()
def get_coll_i18nname(c, ln=CFG_SITE_LANG, verify_cache_timestamp=True):
"""
Return nicely formatted collection name (of the name type `ln'
(=long name)) for collection C in language LN.
This function uses collection_i18nname_cache, but it verifies
whether the cache is up-to-date first by default. This
verification step is performed by checking the DB table update
time. So, if you call this function 1000 times, it can get very
slow because it will do 1000 table update time verifications, even
though collection names change not that often.
Hence the parameter VERIFY_CACHE_TIMESTAMP which, when set to
False, will assume the cache is already up-to-date. This is
useful namely in the generation of collection lists for the search
results page.
"""
if verify_cache_timestamp:
collection_i18nname_cache.recreate_cache_if_needed()
out = c
try:
out = collection_i18nname_cache.cache[c][ln]
except KeyError:
pass # translation in LN does not exist
return out
class FieldI18nNameDataCacher(DataCacher):
"""
Provides cache for I18N field names. This class is not to be used
directly; use function get_field_i18nname() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
try:
res = run_sql("SELECT f.name,fn.ln,fn.value FROM fieldname AS fn, field AS f WHERE fn.id_field=f.id AND fn.type='ln'") # ln=long name
except Exception:
# database problems, return empty cache
return {}
for f, ln, i18nname in res:
if i18nname:
if f not in ret:
ret[f] = {}
ret[f][ln] = i18nname
return ret
def timestamp_verifier():
return get_table_update_time('fieldname')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not field_i18nname_cache.is_ok_p:
raise Exception
except Exception:
field_i18nname_cache = FieldI18nNameDataCacher()
def get_field_i18nname(f, ln=CFG_SITE_LANG, verify_cache_timestamp=True):
"""
Return nicely formatted field name (of type 'ln', 'long name') for
field F in language LN.
If VERIFY_CACHE_TIMESTAMP is set to True, then verify DB timestamp
and field I18N name cache timestamp and refresh cache from the DB
if needed. Otherwise don't bother checking DB timestamp and
return the cached value. (This is useful when get_field_i18nname
is called inside a loop.)
"""
if verify_cache_timestamp:
field_i18nname_cache.recreate_cache_if_needed()
out = f
try:
out = field_i18nname_cache.cache[f][ln]
except KeyError:
pass # translation in LN does not exist
return out
def get_alphabetically_ordered_collection_list(level=0, ln=CFG_SITE_LANG):
"""Returns nicely ordered (score respected) list of collections, more exactly list of tuples
(collection name, printable collection name).
Suitable for create_search_box()."""
out = []
res = run_sql("SELECT name FROM collection ORDER BY name ASC")
for c_name in res:
c_name = c_name[0]
# make a nice printable name (e.g. truncate c_printable for
# long collection names in given language):
c_printable_fullname = get_coll_i18nname(c_name, ln, False)
c_printable = wash_index_term(c_printable_fullname, 30, False)
if c_printable != c_printable_fullname:
c_printable = c_printable + "..."
if level:
c_printable = " " + level * '-' + " " + c_printable
out.append([c_name, c_printable])
return out
def get_nicely_ordered_collection_list(collid=1, level=0, ln=CFG_SITE_LANG):
"""Returns nicely ordered (score respected) list of collections, more exactly list of tuples
(collection name, printable collection name).
Suitable for create_search_box()."""
colls_nicely_ordered = []
res = run_sql("""SELECT c.name,cc.id_son FROM collection_collection AS cc, collection AS c
WHERE c.id=cc.id_son AND cc.id_dad=%s ORDER BY score DESC""", (collid, ))
for c, cid in res:
# make a nice printable name (e.g. truncate c_printable for
# long collection names in given language):
c_printable_fullname = get_coll_i18nname(c, ln, False)
c_printable = wash_index_term(c_printable_fullname, 30, False)
if c_printable != c_printable_fullname:
c_printable = c_printable + "..."
if level:
c_printable = " " + level * '-' + " " + c_printable
colls_nicely_ordered.append([c, c_printable])
colls_nicely_ordered = colls_nicely_ordered + get_nicely_ordered_collection_list(cid, level+1, ln=ln)
return colls_nicely_ordered
def get_index_id_from_field(field):
"""
Return index id with name corresponding to FIELD, or the first
index id where the logical field code named FIELD is indexed.
Return zero in case there is no index defined for this field.
Example: field='author', output=4.
"""
out = 0
if not field:
field = 'global' # empty string field means 'global' index (field 'anyfield')
# first look in the index table:
res = run_sql("""SELECT id FROM idxINDEX WHERE name=%s""", (field,))
if res:
out = res[0][0]
return out
# not found in the index table, now look in the field table:
res = run_sql("""SELECT w.id FROM idxINDEX AS w, idxINDEX_field AS wf, field AS f
WHERE f.code=%s AND wf.id_field=f.id AND w.id=wf.id_idxINDEX
LIMIT 1""", (field,))
if res:
out = res[0][0]
return out
def get_words_from_pattern(pattern):
"""
Returns list of whitespace-separated words from pattern, removing any
trailing punctuation-like signs from words in pattern.
"""
words = {}
# clean trailing punctuation signs inside pattern
pattern = re_punctuation_followed_by_space.sub(' ', pattern)
for word in pattern.split():
if word not in words:
words[word] = 1
return words.keys()
def create_basic_search_units(req, p, f, m=None, of='hb'):
"""Splits search pattern and search field into a list of independently searchable units.
- A search unit consists of '(operator, pattern, field, type, hitset)' tuples where
'operator' is set union (|), set intersection (+) or set exclusion (-);
'pattern' is either a word (e.g. muon*) or a phrase (e.g. 'nuclear physics');
'field' is either a code like 'title' or MARC tag like '100__a';
'type' is the search type ('w' for word file search, 'a' for access file search).
- Optionally, the function accepts the match type argument 'm'.
If it is set (e.g. from advanced search interface), then it
performs this kind of matching. If it is not set, then a guess is made.
'm' can have values: 'a'='all of the words', 'o'='any of the words',
'p'='phrase/substring', 'r'='regular expression',
'e'='exact value'.
- Warnings are printed on req (when not None) in case of HTML output formats."""
opfts = [] # will hold (o,p,f,t,h) units
# FIXME: quick hack for the journal index
if f == 'journal':
opfts.append(['+', p, f, 'e'])
return opfts
## check arguments: is desired matching type set?
if m:
## A - matching type is known; good!
if m == 'e':
# A1 - exact value:
opfts.append(['+', p, f, 'a']) # '+' since we have only one unit
elif m == 'p':
# A2 - phrase/substring:
opfts.append(['+', "%" + p + "%", f, 'a']) # '+' since we have only one unit
elif m == 'r':
# A3 - regular expression:
opfts.append(['+', p, f, 'r']) # '+' since we have only one unit
elif m == 'a' or m == 'w':
# A4 - all of the words:
p = strip_accents(p) # strip accents for 'w' mode, FIXME: delete when not needed
for word in get_words_from_pattern(p):
opfts.append(['+', word, f, 'w']) # '+' in all units
elif m == 'o':
# A5 - any of the words:
p = strip_accents(p) # strip accents for 'w' mode, FIXME: delete when not needed
for word in get_words_from_pattern(p):
if len(opfts)==0:
opfts.append(['+', word, f, 'w']) # '+' in the first unit
else:
opfts.append(['|', word, f, 'w']) # '|' in further units
else:
if of.startswith("h"):
write_warning("Matching type '%s' is not implemented yet." % cgi.escape(m), "Warning", req=req)
opfts.append(['+', "%" + p + "%", f, 'w'])
else:
## B - matching type is not known: let us try to determine it by some heuristics
if f and p[0] == '"' and p[-1] == '"':
## B0 - does 'p' start and end by double quote, and is 'f' defined? => doing ACC search
opfts.append(['+', p[1:-1], f, 'a'])
elif f in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor', 'authorityauthor') and author_name_requires_phrase_search(p):
## B1 - do we search in author, and does 'p' contain space/comma/dot/etc?
## => doing washed ACC search
opfts.append(['+', p, f, 'a'])
elif f and p[0] == "'" and p[-1] == "'":
## B0bis - does 'p' start and end by single quote, and is 'f' defined? => doing ACC search
opfts.append(['+', '%' + p[1:-1] + '%', f, 'a'])
elif f and p[0] == "/" and p[-1] == "/":
## B0ter - does 'p' start and end by a slash, and is 'f' defined? => doing regexp search
opfts.append(['+', p[1:-1], f, 'r'])
elif f and p.find(',') >= 0:
## B1 - does 'p' contain comma, and is 'f' defined? => doing ACC search
opfts.append(['+', p, f, 'a'])
elif f and str(f[0:2]).isdigit():
## B2 - does 'f' exist and starts by two digits? => doing ACC search
opfts.append(['+', p, f, 'a'])
else:
## B3 - doing WRD search, but maybe ACC too
# search units are separated by spaces unless the space is within single or double quotes
# so, let us replace temporarily any space within quotes by '__SPACE__'
p = re_pattern_single_quotes.sub(lambda x: "'"+x.group(1).replace(' ', '__SPACE__')+"'", p)
p = re_pattern_double_quotes.sub(lambda x: "\""+x.group(1).replace(' ', '__SPACE__')+"\"", p)
p = re_pattern_regexp_quotes.sub(lambda x: "/"+x.group(1).replace(' ', '__SPACE__')+"/", p)
# and spaces after colon as well:
p = re_pattern_spaces_after_colon.sub(lambda x: x.group(1).replace(' ', '__SPACE__'), p)
# wash argument:
p = re_logical_and.sub(" ", p)
p = re_logical_or.sub(" |", p)
p = re_logical_not.sub(" -", p)
p = re_operators.sub(r' \1', p)
for pi in p.split(): # iterate through separated units (or items, as "pi" stands for "p item")
pi = re_pattern_space.sub(" ", pi) # replace back '__SPACE__' by ' '
# firstly, determine set operator
if pi[0] == '+' or pi[0] == '-' or pi[0] == '|':
oi = pi[0]
pi = pi[1:]
else:
# okay, there is no operator, so let us decide what to do by default
oi = '+' # by default we are doing set intersection...
# secondly, determine search pattern and field:
if pi.find(":") > 0:
fi, pi = pi.split(":", 1)
fi = wash_field(fi)
# test whether fi is a real index code or a MARC-tag defined code:
if fi in get_fieldcodes() or '00' <= fi[:2] <= '99':
pass
else:
# it is not, so join it back:
fi, pi = f, fi + ":" + pi
else:
fi, pi = f, pi
# wash 'fi' argument:
fi = wash_field(fi)
# wash 'pi' argument:
pi = pi.strip() # strip eventual spaces
if re_quotes.match(pi):
# B3a - quotes are found => do ACC search (phrase search)
if pi[0] == '"' and pi[-1] == '"':
pi = pi.replace('"', '') # remove quote signs
opfts.append([oi, pi, fi, 'a'])
elif pi[0] == "'" and pi[-1] == "'":
pi = pi.replace("'", "") # remove quote signs
opfts.append([oi, "%" + pi + "%", fi, 'a'])
else: # unbalanced quotes, so fall back to WRD query:
opfts.append([oi, pi, fi, 'w'])
elif pi.startswith('/') and pi.endswith('/'):
# B3b - pi has slashes around => do regexp search
opfts.append([oi, pi[1:-1], fi, 'r'])
elif fi and len(fi) > 1 and str(fi[0]).isdigit() and str(fi[1]).isdigit():
# B3c - fi exists and starts by two digits => do ACC search
opfts.append([oi, pi, fi, 'a'])
elif fi and not get_index_id_from_field(fi) and get_field_name(fi):
# B3d - logical field fi exists but there is no WRD index for fi => try ACC search
opfts.append([oi, pi, fi, 'a'])
else:
# B3e - general case => do WRD search
pi = strip_accents(pi) # strip accents for 'w' mode, FIXME: delete when not needed
for pii in get_words_from_pattern(pi):
opfts.append([oi, pii, fi, 'w'])
## sanity check:
for i in range(0, len(opfts)):
try:
pi = opfts[i][1]
if pi == '*':
if of.startswith("h"):
write_warning("Ignoring standalone wildcard word.", "Warning", req=req)
del opfts[i]
if pi == '' or pi == ' ':
fi = opfts[i][2]
if fi:
if of.startswith("h"):
write_warning("Ignoring empty <em>%s</em> search term." % fi, "Warning", req=req)
del opfts[i]
except:
pass
## replace old logical field names if applicable:
if CFG_WEBSEARCH_FIELDS_CONVERT:
opfts = [[o, p, wash_field(f), t] for o, p, f, t in opfts]
## return search units:
return opfts
def page_start(req, of, cc, aas, ln, uid, title_message=None,
description='', keywords='', recID=-1, tab='', p='', em=''):
"""
Start page according to given output format.
@param title_message: title of the page, not escaped for HTML
@param description: description of the page, not escaped for HTML
@param keywords: keywords of the page, not escaped for HTML
"""
_ = gettext_set_language(ln)
if not req or isinstance(req, cStringIO.OutputType):
return # we were called from CLI
if not title_message:
title_message = _("Search Results")
content_type = get_output_format_content_type(of)
if of.startswith('x'):
if of == 'xr':
# we are doing RSS output
req.content_type = "application/rss+xml"
req.send_http_header()
req.write("""<?xml version="1.0" encoding="UTF-8"?>\n""")
else:
# we are doing XML output:
req.content_type = get_output_format_content_type(of, 'text/xml')
if 'Origin' in req.headers_in:
req.headers_out["Access-Control-Allow-Origin"] = '*'
req.send_http_header()
req.write("""<?xml version="1.0" encoding="UTF-8"?>\n""")
elif of.startswith('t') or str(of[0:3]).isdigit():
# we are doing plain text output:
req.content_type = "text/plain"
req.send_http_header()
elif of == "intbitset":
req.content_type = "application/octet-stream"
req.send_http_header()
elif of == "recjson":
req.content_type = "application/json"
if 'Origin' in req.headers_in:
req.headers_out["Access-Control-Allow-Origin"] = '*'
req.send_http_header()
elif of == "id":
pass # nothing to do, we shall only return list of recIDs
elif content_type == 'text/html':
# we are doing HTML output:
req.content_type = "text/html"
req.send_http_header()
if not description:
description = "%s %s." % (cc, _("Search Results"))
if not keywords:
keywords = "%s, WebSearch, %s" % (get_coll_i18nname(CFG_SITE_NAME, ln, False), get_coll_i18nname(cc, ln, False))
## generate RSS URL:
argd = {}
if req.args:
argd = cgi.parse_qs(req.args)
rssurl = websearch_templates.build_rss_url(argd)
## add MathJax if displaying single records (FIXME: find
## eventual better place to this code)
if of.lower() in CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS:
metaheaderadd = get_mathjax_header(req.is_https())
else:
metaheaderadd = ''
# Add metadata in meta tags for Google scholar-esque harvesting...
# only if we have a detailed meta format and we are looking at a
# single record
if recID != -1 and CFG_WEBSEARCH_DETAILED_META_FORMAT and \
record_exists(recID) == 1:
metaheaderadd += format_record(recID,
CFG_WEBSEARCH_DETAILED_META_FORMAT,
ln=ln)
## generate navtrail:
navtrail = create_navtrail_links(cc, aas, ln)
if navtrail != '':
navtrail += ' > '
if (tab != '' or ((of != '' or of.lower() != 'hd') and of != 'hb')) and \
recID != -1:
# If we are not in information tab in HD format, customize
# the nav. trail to have a link back to main record. (Due
# to the way perform_request_search() works, hb
# (lowercase) is equal to hd)
navtrail += ' <a class="navtrail" href="%s/%s/%s">%s</a>' % \
(CFG_BASE_URL, CFG_SITE_RECORD, recID, cgi.escape(title_message))
if (of != '' or of.lower() != 'hd') and of != 'hb':
# Export
format_name = of
query = "SELECT name FROM format WHERE code=%s"
res = run_sql(query, (of,))
if res:
format_name = res[0][0]
navtrail += ' > ' + format_name
else:
# Discussion, citations, etc. tabs
tab_label = get_detailed_page_tabs(cc, ln=ln)[tab]['label']
navtrail += ' > ' + _(tab_label)
else:
navtrail += cgi.escape(title_message)
if p:
# we are serving search/browse results pages, so insert pattern:
navtrail += ": " + cgi.escape(p)
title_message = p + " - " + title_message
body_css_classes = []
if cc:
# we know the collection, lets allow page styles based on cc
#collection names may not satisfy rules for css classes which
#are something like: -?[_a-zA-Z]+[_a-zA-Z0-9-]*
#however it isn't clear what we should do about cases with
#numbers, so we leave them to fail. Everything else becomes "_"
css = nmtoken_from_string(cc).replace('.', '_').replace('-', '_').replace(':', '_')
body_css_classes.append(css)
## finally, print page header:
if em == '' or EM_REPOSITORY["header"] in em:
req.write(pageheaderonly(req=req, title=title_message,
navtrail=navtrail,
description=description,
keywords=keywords,
metaheaderadd=metaheaderadd,
uid=uid,
language=ln,
navmenuid='search',
navtrail_append_title_p=0,
rssurl=rssurl,
body_css_classes=body_css_classes))
req.write(websearch_templates.tmpl_search_pagestart(ln=ln))
else:
req.content_type = content_type
req.send_http_header()
def page_end(req, of="hb", ln=CFG_SITE_LANG, em=""):
"End page according to given output format: e.g. close XML tags, add HTML footer, etc."
if of == "id":
return [] # empty recID list
if of == "intbitset":
return intbitset()
if not req:
return # we were called from CLI
if of.startswith('h'):
req.write(websearch_templates.tmpl_search_pageend(ln = ln)) # pagebody end
if em == "" or EM_REPOSITORY["footer"] in em:
req.write(pagefooteronly(lastupdated=__lastupdated__, language=ln, req=req))
return
def create_page_title_search_pattern_info(p, p1, p2, p3):
"""Create the search pattern bit for the page <title> web page
HTML header. Basically combine p and (p1,p2,p3) together so that
the page header may be filled whether we are in the Simple Search
or Advanced Search interface contexts."""
out = ""
if p:
out = p
else:
out = p1
if p2:
out += ' ' + p2
if p3:
out += ' ' + p3
return out
def create_inputdate_box(name="d1", selected_year=0, selected_month=0, selected_day=0, ln=CFG_SITE_LANG):
"Produces 'From Date', 'Until Date' kind of selection box. Suitable for search options."
_ = gettext_set_language(ln)
box = ""
# day
box += """<select name="%sd">""" % name
box += """<option value="">%s""" % _("any day")
for day in range(1, 32):
box += """<option value="%02d"%s>%02d""" % (day, is_selected(day, selected_day), day)
box += """</select>"""
# month
box += """<select name="%sm">""" % name
box += """<option value="">%s""" % _("any month")
# trailing space in May distinguishes short/long form of the month name
for mm, month in [(1, _("January")), (2, _("February")), (3, _("March")), (4, _("April")),
(5, _("May ")), (6, _("June")), (7, _("July")), (8, _("August")),
(9, _("September")), (10, _("October")), (11, _("November")), (12, _("December"))]:
box += """<option value="%02d"%s>%s""" % (mm, is_selected(mm, selected_month), month.strip())
box += """</select>"""
# year
box += """<select name="%sy">""" % name
box += """<option value="">%s""" % _("any year")
this_year = int(time.strftime("%Y", time.localtime()))
for year in range(this_year-20, this_year+1):
box += """<option value="%d"%s>%d""" % (year, is_selected(year, selected_year), year)
box += """</select>"""
return box
def create_search_box(cc, colls, p, f, rg, sf, so, sp, rm, of, ot, aas,
ln, p1, f1, m1, op1, p2, f2, m2, op2, p3, f3,
m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec,
action="", em=""):
"""Create search box for 'search again in the results page' functionality."""
if em != "" and EM_REPOSITORY["search_box"] not in em:
if EM_REPOSITORY["body"] in em and cc != CFG_SITE_NAME:
return '''
<h1 class="headline">%(ccname)s</h1>''' % {'ccname' : cgi.escape(cc), }
else:
return ""
# load the right message language
_ = gettext_set_language(ln)
# some computations
cc_intl = get_coll_i18nname(cc, ln, False)
cc_colID = get_colID(cc)
colls_nicely_ordered = []
if cfg_nicely_ordered_collection_list:
colls_nicely_ordered = get_nicely_ordered_collection_list(ln=ln)
else:
colls_nicely_ordered = get_alphabetically_ordered_collection_list(ln=ln)
colls_nice = []
for (cx, cx_printable) in colls_nicely_ordered:
if not cx.startswith("Unnamed collection"):
colls_nice.append({'value': cx,
'text': cx_printable
})
coll_selects = []
if colls and colls[0] != CFG_SITE_NAME:
# some collections are defined, so print these first, and only then print 'add another collection' heading:
for c in colls:
if c:
temp = []
temp.append({'value': CFG_SITE_NAME,
'text': '*** %s ***' % (CFG_SCOAP3_SITE and _("any publisher or journal") or _("any public collection"))
})
# this field is used to remove the current collection from the ones to be searched.
temp.append({'value': '',
'text': '*** %s ***' % (CFG_SCOAP3_SITE and _("remove this publisher or journal") or _("remove this collection"))
})
for val in colls_nice:
# print collection:
if not cx.startswith("Unnamed collection"):
temp.append({'value': val['value'],
'text': val['text'],
'selected' : (c == re.sub(r"^[\s\-]*", "", val['value']))
})
coll_selects.append(temp)
coll_selects.append([{'value': '',
'text' : '*** %s ***' % (CFG_SCOAP3_SITE and _("add another publisher or journal") or _("add another collection"))
}] + colls_nice)
else: # we searched in CFG_SITE_NAME, so print 'any public collection' heading
coll_selects.append([{'value': CFG_SITE_NAME,
'text' : '*** %s ***' % (CFG_SCOAP3_SITE and _("any publisher or journal") or _("any public collection"))
}] + colls_nice)
## ranking methods
ranks = [{
'value' : '',
'text' : "- %s %s -" % (_("OR").lower(), _("rank by")),
}]
for (code, name) in get_bibrank_methods(cc_colID, ln):
# propose found rank methods:
ranks.append({
'value': code,
'text': name,
})
formats = get_available_output_formats(visible_only=True)
# show collections in the search box? (not if there is only one
# collection defined, and not if we are in light search)
show_colls = True
show_title = True
if len(collection_reclist_cache.cache.keys()) == 1 or \
aas == -1:
show_colls = False
show_title = False
if cc == CFG_SITE_NAME:
show_title = False
if CFG_INSPIRE_SITE:
show_title = False
return websearch_templates.tmpl_search_box(
ln = ln,
aas = aas,
cc_intl = cc_intl,
cc = cc,
ot = ot,
sp = sp,
action = action,
fieldslist = get_searchwithin_fields(ln=ln, colID=cc_colID),
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
p1 = p1,
p2 = p2,
p3 = p3,
op1 = op1,
op2 = op2,
rm = rm,
p = p,
f = f,
coll_selects = coll_selects,
d1y = d1y, d2y = d2y, d1m = d1m, d2m = d2m, d1d = d1d, d2d = d2d,
dt = dt,
sort_fields = get_sortby_fields(ln=ln, colID=cc_colID),
sf = sf,
so = so,
ranks = ranks,
sc = sc,
rg = rg,
formats = formats,
of = of,
pl = pl,
jrec = jrec,
ec = ec,
show_colls = show_colls,
show_title = show_title and (em=="" or EM_REPOSITORY["body"] in em)
)
def create_exact_author_browse_help_link(p=None, p1=None, p2=None, p3=None, f=None, f1=None, f2=None, f3=None,
rm=None, cc=None, ln=None, jrec=None, rg=None, aas=0, action=""):
"""Creates a link to help switch from author to exact author while browsing"""
if action == 'browse':
search_fields = (f, f1, f2, f3)
if 'author' in search_fields or 'firstauthor' in search_fields:
def add_exact(field):
if field == 'author' or field == 'firstauthor':
return 'exact' + field
return field
fe, f1e, f2e, f3e = [add_exact(field) for field in search_fields]
link_name = f or f1
link_name = (link_name == 'firstauthor' and 'exact first author') or 'exact author'
return websearch_templates.tmpl_exact_author_browse_help_link(p=p, p1=p1, p2=p2, p3=p3, f=fe, f1=f1e, f2=f2e, f3=f3e,
rm=rm, cc=cc, ln=ln, jrec=jrec, rg=rg, aas=aas, action=action,
link_name=link_name)
return ""
def create_navtrail_links(cc=CFG_SITE_NAME, aas=0, ln=CFG_SITE_LANG, self_p=1, tab=''):
"""Creates navigation trail links, i.e. links to collection
ancestors (except Home collection). If aas==1, then links to
Advanced Search interfaces; otherwise Simple Search.
"""
dads = []
for dad in get_coll_ancestors(cc):
if dad != CFG_SITE_NAME: # exclude Home collection
dads.append((dad, get_coll_i18nname(dad, ln, False)))
if self_p and cc != CFG_SITE_NAME:
dads.append((cc, get_coll_i18nname(cc, ln, False)))
return websearch_templates.tmpl_navtrail_links(
aas=aas, ln=ln, dads=dads)
def get_searchwithin_fields(ln='en', colID=None):
"""Retrieves the fields name used in the 'search within' selection box for the collection ID colID."""
res = None
if colID:
res = run_sql("""SELECT f.code,f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='sew' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (colID,))
if not res:
res = run_sql("SELECT code,name FROM field ORDER BY name ASC")
fields = [{
'value' : '',
'text' : get_field_i18nname("any field", ln, False)
}]
for field_code, field_name in res:
if field_code and field_code != "anyfield":
fields.append({'value': field_code,
'text': get_field_i18nname(field_name, ln, False)
})
return fields
def get_sortby_fields(ln='en', colID=None):
"""Retrieves the fields name used in the 'sort by' selection box for the collection ID colID."""
_ = gettext_set_language(ln)
res = None
if colID:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (colID,))
if not res:
# no sort fields defined for this colID, try to take Home collection:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (1,))
if not res:
# no sort fields defined for the Home collection, take all sort fields defined wherever they are:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""",)
fields = [{
'value': '',
'text': _("latest first")
}]
for field_code, field_name in res:
if field_code and field_code != "anyfield":
fields.append({'value': field_code,
'text': get_field_i18nname(field_name, ln, False)
})
return fields
def create_andornot_box(name='op', value='', ln='en'):
"Returns HTML code for the AND/OR/NOT selection box."
_ = gettext_set_language(ln)
out = """
<select name="%s">
<option value="a"%s>%s
<option value="o"%s>%s
<option value="n"%s>%s
</select>
""" % (name,
is_selected('a', value), _("AND"),
is_selected('o', value), _("OR"),
is_selected('n', value), _("AND NOT"))
return out
def create_matchtype_box(name='m', value='', ln='en'):
"Returns HTML code for the 'match type' selection box."
_ = gettext_set_language(ln)
out = """
<select name="%s">
<option value="a"%s>%s
<option value="o"%s>%s
<option value="e"%s>%s
<option value="p"%s>%s
<option value="r"%s>%s
</select>
""" % (name,
is_selected('a', value), _("All of the words:"),
is_selected('o', value), _("Any of the words:"),
is_selected('e', value), _("Exact phrase:"),
is_selected('p', value), _("Partial phrase:"),
is_selected('r', value), _("Regular expression:"))
return out
def is_selected(var, fld):
"Checks if the two are equal, and if yes, returns ' selected'. Useful for select boxes."
if type(var) is int and type(fld) is int:
if var == fld:
return " selected"
elif str(var) == str(fld):
return " selected"
elif fld and len(fld)==3 and fld[0] == "w" and var == fld[1:]:
return " selected"
return ""
def wash_colls(cc, c, split_colls=0, verbose=0):
"""Wash collection list by checking whether user has deselected
anything under 'Narrow search'. Checks also if cc is a list or not.
Return list of cc, colls_to_display, colls_to_search since the list
of collections to display is different from that to search in.
This is because users might have chosen 'split by collection'
functionality.
The behaviour of "collections to display" depends solely whether
user has deselected a particular collection: e.g. if it started
from 'Articles and Preprints' page, and deselected 'Preprints',
then collection to display is 'Articles'. If he did not deselect
anything, then collection to display is 'Articles & Preprints'.
The behaviour of "collections to search in" depends on the
'split_colls' parameter:
* if is equal to 1, then we can wash the colls list down
and search solely in the collection the user started from;
* if is equal to 0, then we are splitting to the first level
of collections, i.e. collections as they appear on the page
we started to search from;
The function raises exception
InvenioWebSearchUnknownCollectionError
if cc or one of c collections is not known.
"""
colls_out = []
colls_out_for_display = []
# list to hold the hosted collections to be searched and displayed
hosted_colls_out = []
debug = ""
if verbose:
debug += "<br />"
debug += "<br />1) --- initial parameters ---"
debug += "<br />cc : %s" % cc
debug += "<br />c : %s" % c
debug += "<br />"
# check what type is 'cc':
if type(cc) is list:
for ci in cc:
if ci in collection_reclist_cache.cache:
# yes this collection is real, so use it:
cc = ci
break
else:
# check once if cc is real:
if cc not in collection_reclist_cache.cache:
if cc:
raise InvenioWebSearchUnknownCollectionError(cc)
else:
cc = CFG_SITE_NAME # cc is not set, so replace it with Home collection
# check type of 'c' argument:
if type(c) is list:
colls = c
else:
colls = [c]
if verbose:
debug += "<br />2) --- after check for the integrity of cc and the being or not c a list ---"
debug += "<br />cc : %s" % cc
debug += "<br />c : %s" % c
debug += "<br />"
# remove all 'unreal' collections:
colls_real = []
for coll in colls:
if coll in collection_reclist_cache.cache:
colls_real.append(coll)
else:
if coll:
raise InvenioWebSearchUnknownCollectionError(coll)
colls = colls_real
if verbose:
debug += "<br />3) --- keeping only the real colls of c ---"
debug += "<br />colls : %s" % colls
debug += "<br />"
# check if some real collections remain:
if len(colls)==0:
colls = [cc]
if verbose:
debug += "<br />4) --- in case no colls were left we use cc directly ---"
debug += "<br />colls : %s" % colls
debug += "<br />"
# then let us check the list of non-restricted "real" sons of 'cc' and compare it to 'coll':
res = run_sql("""SELECT c.name FROM collection AS c,
collection_collection AS cc,
collection AS ccc
WHERE c.id=cc.id_son AND cc.id_dad=ccc.id
AND ccc.name=%s AND cc.type='r'""", (cc,))
# list that holds all the non restricted sons of cc that are also not hosted collections
l_cc_nonrestricted_sons_and_nonhosted_colls = []
res_hosted = run_sql("""SELECT c.name FROM collection AS c,
collection_collection AS cc,
collection AS ccc
WHERE c.id=cc.id_son AND cc.id_dad=ccc.id
AND ccc.name=%s AND cc.type='r'
AND (c.dbquery NOT LIKE 'hostedcollection:%%' OR c.dbquery IS NULL)""", (cc,))
for row_hosted in res_hosted:
l_cc_nonrestricted_sons_and_nonhosted_colls.append(row_hosted[0])
l_cc_nonrestricted_sons_and_nonhosted_colls.sort()
l_cc_nonrestricted_sons = []
l_c = colls[:]
for row in res:
if not collection_restricted_p(row[0]):
l_cc_nonrestricted_sons.append(row[0])
l_c.sort()
l_cc_nonrestricted_sons.sort()
if l_cc_nonrestricted_sons == l_c:
colls_out_for_display = [cc] # yep, washing permitted, it is sufficient to display 'cc'
# the following elif is a hack that preserves the above funcionality when we start searching from
# the frontpage with some hosted collections deselected (either by default or manually)
elif set(l_cc_nonrestricted_sons_and_nonhosted_colls).issubset(set(l_c)):
colls_out_for_display = colls
split_colls = 0
else:
colls_out_for_display = colls # nope, we need to display all 'colls' successively
# remove duplicates:
#colls_out_for_display_nondups=filter(lambda x, colls_out_for_display=colls_out_for_display: colls_out_for_display[x-1] not in colls_out_for_display[x:], range(1, len(colls_out_for_display)+1))
#colls_out_for_display = map(lambda x, colls_out_for_display=colls_out_for_display:colls_out_for_display[x-1], colls_out_for_display_nondups)
#colls_out_for_display = list(set(colls_out_for_display))
#remove duplicates while preserving the order
set_out = set()
colls_out_for_display = [coll for coll in colls_out_for_display if coll not in set_out and not set_out.add(coll)]
if verbose:
debug += "<br />5) --- decide whether colls_out_for_diplay should be colls or is it sufficient for it to be cc; remove duplicates ---"
debug += "<br />colls_out_for_display : %s" % colls_out_for_display
debug += "<br />"
# FIXME: The below quoted part of the code has been commented out
# because it prevents searching in individual restricted daughter
# collections when both parent and all its public daughter
# collections were asked for, in addition to some restricted
# daughter collections. The removal was introduced for hosted
# collections, so we may want to double check in this context.
# the following piece of code takes care of removing collections whose ancestors are going to be searched anyway
# list to hold the collections to be removed
#colls_to_be_removed = []
# first calculate the collections that can safely be removed
#for coll in colls_out_for_display:
# for ancestor in get_coll_ancestors(coll):
# #if ancestor in colls_out_for_display: colls_to_be_removed.append(coll)
# if ancestor in colls_out_for_display and not is_hosted_collection(coll): colls_to_be_removed.append(coll)
# secondly remove the collections
#for coll in colls_to_be_removed:
# colls_out_for_display.remove(coll)
if verbose:
debug += "<br />6) --- remove collections that have ancestors about to be search, unless they are hosted ---"
debug += "<br />colls_out_for_display : %s" % colls_out_for_display
debug += "<br />"
# calculate the hosted collections to be searched.
if colls_out_for_display == [cc]:
if is_hosted_collection(cc):
hosted_colls_out.append(cc)
else:
for coll in get_coll_sons(cc):
if is_hosted_collection(coll):
hosted_colls_out.append(coll)
else:
for coll in colls_out_for_display:
if is_hosted_collection(coll):
hosted_colls_out.append(coll)
if verbose:
debug += "<br />7) --- calculate the hosted_colls_out ---"
debug += "<br />hosted_colls_out : %s" % hosted_colls_out
debug += "<br />"
# second, let us decide on collection splitting:
if split_colls == 0:
# type A - no sons are wanted
colls_out = colls_out_for_display
else:
# type B - sons (first-level descendants) are wanted
for coll in colls_out_for_display:
coll_sons = get_coll_sons(coll)
if coll_sons == []:
colls_out.append(coll)
else:
for coll_son in coll_sons:
if not is_hosted_collection(coll_son):
colls_out.append(coll_son)
#else:
# colls_out = colls_out + coll_sons
# remove duplicates:
#colls_out_nondups=filter(lambda x, colls_out=colls_out: colls_out[x-1] not in colls_out[x:], range(1, len(colls_out)+1))
#colls_out = map(lambda x, colls_out=colls_out:colls_out[x-1], colls_out_nondups)
#colls_out = list(set(colls_out))
#remove duplicates while preserving the order
set_out = set()
colls_out = [coll for coll in colls_out if coll not in set_out and not set_out.add(coll)]
if verbose:
debug += "<br />8) --- calculate the colls_out; remove duplicates ---"
debug += "<br />colls_out : %s" % colls_out
debug += "<br />"
# remove the hosted collections from the collections to be searched
if hosted_colls_out:
for coll in hosted_colls_out:
try:
colls_out.remove(coll)
except ValueError:
# in case coll was not found in colls_out
pass
if verbose:
debug += "<br />9) --- remove the hosted_colls from the colls_out ---"
debug += "<br />colls_out : %s" % colls_out
return (cc, colls_out_for_display, colls_out, hosted_colls_out, debug)
def get_synonym_terms(term, kbr_name, match_type, use_memoise=False):
"""
Return list of synonyms for TERM by looking in KBR_NAME in
MATCH_TYPE style.
@param term: search-time term or index-time term
@type term: str
@param kbr_name: knowledge base name
@type kbr_name: str
@param match_type: specifies how the term matches against the KBR
before doing the lookup. Could be `exact' (default),
'leading_to_comma', `leading_to_number'.
@type match_type: str
@param use_memoise: can we memoise while doing lookups?
@type use_memoise: bool
@return: list of term synonyms
@rtype: list of strings
"""
dterms = {}
## exact match is default:
term_for_lookup = term
term_remainder = ''
## but maybe match different term:
if match_type == CFG_BIBINDEX_SYNONYM_MATCH_TYPE['leading_to_comma']:
mmm = re.match(r'^(.*?)(\s*,.*)$', term)
if mmm:
term_for_lookup = mmm.group(1)
term_remainder = mmm.group(2)
elif match_type == CFG_BIBINDEX_SYNONYM_MATCH_TYPE['leading_to_number']:
mmm = re.match(r'^(.*?)(\s*\d.*)$', term)
if mmm:
term_for_lookup = mmm.group(1)
term_remainder = mmm.group(2)
## FIXME: workaround: escaping SQL wild-card signs, since KBR's
## exact search is doing LIKE query, so would match everything:
term_for_lookup = term_for_lookup.replace('%', '\\%')
## OK, now find synonyms:
for kbr_values in get_kbr_values(kbr_name,
searchkey=term_for_lookup,
searchtype='e',
use_memoise=use_memoise):
for kbr_value in kbr_values:
dterms[kbr_value + term_remainder] = 1
## return list of term synonyms:
return dterms.keys()
def wash_output_format(ouput_format, verbose=False, req=None):
"""Wash output format FORMAT. Currently only prevents input like
'of=9' for backwards-compatible format that prints certain fields
only. (for this task, 'of=tm' is preferred)"""
if str(ouput_format[0:3]).isdigit() and len(ouput_format) != 6:
# asked to print MARC tags, but not enough digits,
# so let's switch back to HTML brief default
return 'hb'
elif format in CFG_WEBSEARCH_BLACKLISTED_FORMATS:
if verbose:
write_warning("Selected format is not available through perform_request_search", req=req)
# Returning an empty list seems dangerous because you wouldn't know
# right away that the list is not supposed to be empty.
return 'hb'
else:
return ouput_format
def wash_pattern(p):
"""Wash pattern passed by URL. Check for sanity of the wildcard by
removing wildcards if they are appended to extremely short words
(1-3 letters). TODO: instead of this approximative treatment, it
will be much better to introduce a temporal limit, e.g. to kill a
query if it does not finish in 10 seconds."""
# strip accents:
# p = strip_accents(p) # FIXME: when available, strip accents all the time
# add leading/trailing whitespace for the two following wildcard-sanity checking regexps:
p = " " + p + " "
# replace spaces within quotes by __SPACE__ temporarily:
p = re_pattern_single_quotes.sub(lambda x: "'"+x.group(1).replace(' ', '__SPACE__')+"'", p)
p = re_pattern_double_quotes.sub(lambda x: "\""+x.group(1).replace(' ', '__SPACE__')+"\"", p)
p = re_pattern_regexp_quotes.sub(lambda x: "/"+x.group(1).replace(' ', '__SPACE__')+"/", p)
# get rid of unquoted wildcards after spaces:
p = re_pattern_wildcards_after_spaces.sub("\\1", p)
# get rid of extremely short words (1-3 letters with wildcards):
#p = re_pattern_short_words.sub("\\1", p)
# replace back __SPACE__ by spaces:
p = re_pattern_space.sub(" ", p)
# replace special terms:
p = re_pattern_today.sub(time.strftime("%Y-%m-%d", time.localtime()), p)
# remove unnecessary whitespace:
p = p.strip()
# remove potentially wrong UTF-8 characters:
p = wash_for_utf8(p)
return p
def wash_field(f):
"""Wash field passed by URL."""
if f:
# get rid of unnecessary whitespace and make it lowercase
# (e.g. Author -> author) to better suit iPhone etc input
# mode:
f = f.strip().lower()
# wash legacy 'f' field names, e.g. replace 'wau' or `au' by
# 'author', if applicable:
if CFG_WEBSEARCH_FIELDS_CONVERT:
f = CFG_WEBSEARCH_FIELDS_CONVERT.get(f, f)
return f
def wash_dates(d1="", d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0):
"""
Take user-submitted date arguments D1 (full datetime string) or
(D1Y, D1M, D1Y) year, month, day tuple and D2 or (D2Y, D2M, D2Y)
and return (YYY1-M1-D2 H1:M1:S2, YYY2-M2-D2 H2:M2:S2) datetime
strings in the YYYY-MM-DD HH:MM:SS format suitable for time
restricted searching.
Note that when both D1 and (D1Y, D1M, D1D) parameters are present,
the precedence goes to D1. Ditto for D2*.
Note that when (D1Y, D1M, D1D) are taken into account, some values
may be missing and are completed e.g. to 01 or 12 according to
whether it is the starting or the ending date.
"""
datetext1, datetext2 = "", ""
# sanity checking:
if d1 == "" and d1y == 0 and d1m == 0 and d1d == 0 and d2 == "" and d2y == 0 and d2m == 0 and d2d == 0:
return ("", "") # nothing selected, so return empty values
# wash first (starting) date:
if d1:
# full datetime string takes precedence:
datetext1 = d1
else:
# okay, first date passed as (year,month,day):
if d1y:
datetext1 += "%04d" % d1y
else:
datetext1 += "0000"
if d1m:
datetext1 += "-%02d" % d1m
else:
datetext1 += "-01"
if d1d:
datetext1 += "-%02d" % d1d
else:
datetext1 += "-01"
datetext1 += " 00:00:00"
# wash second (ending) date:
if d2:
# full datetime string takes precedence:
datetext2 = d2
else:
# okay, second date passed as (year,month,day):
if d2y:
datetext2 += "%04d" % d2y
else:
datetext2 += "9999"
if d2m:
datetext2 += "-%02d" % d2m
else:
datetext2 += "-12"
if d2d:
datetext2 += "-%02d" % d2d
else:
datetext2 += "-31" # NOTE: perhaps we should add max(datenumber) in
# given month, but for our quering it's not
# needed, 31 will always do
datetext2 += " 00:00:00"
# okay, return constructed YYYY-MM-DD HH:MM:SS datetexts:
return (datetext1, datetext2)
def is_hosted_collection(coll):
"""Check if the given collection is a hosted one; i.e. its dbquery starts with hostedcollection:
Returns True if it is, False if it's not or if the result is empty or if the query failed"""
res = run_sql("SELECT dbquery FROM collection WHERE name=%s", (coll, ))
if not res[0][0]:
return False
try:
return res[0][0].startswith("hostedcollection:")
except IndexError:
return False
def get_colID(c):
"Return collection ID for collection name C. Return None if no match found."
colID = None
res = run_sql("SELECT id FROM collection WHERE name=%s", (c,), 1)
if res:
colID = res[0][0]
return colID
def get_coll_normalised_name(c):
"""Returns normalised collection name (case sensitive) for collection name
C (case insensitive).
Returns None if no match found."""
res = run_sql("SELECT name FROM collection WHERE name=%s", (c,))
if res:
return res[0][0]
else:
return None
def get_coll_ancestors(coll):
"Returns a list of ancestors for collection 'coll'."
coll_ancestors = []
coll_ancestor = coll
while 1:
res = run_sql("""SELECT c.name FROM collection AS c
LEFT JOIN collection_collection AS cc ON c.id=cc.id_dad
LEFT JOIN collection AS ccc ON ccc.id=cc.id_son
WHERE ccc.name=%s ORDER BY cc.id_dad ASC LIMIT 1""",
(coll_ancestor,))
if res:
coll_name = res[0][0]
coll_ancestors.append(coll_name)
coll_ancestor = coll_name
else:
break
# ancestors found, return reversed list:
coll_ancestors.reverse()
return coll_ancestors
def get_coll_sons(coll, coll_type='r', public_only=1):
"""Return a list of sons (first-level descendants) of type 'coll_type' for collection 'coll'.
If coll_type = '*', both regular and virtual collections will be returned.
If public_only, then return only non-restricted son collections.
"""
coll_sons = []
if coll_type == '*':
coll_type_query = " IN ('r', 'v')"
query_params = (coll, )
else:
coll_type_query = "=%s"
query_params = (coll_type, coll)
query = "SELECT c.name FROM collection AS c "\
"LEFT JOIN collection_collection AS cc ON c.id=cc.id_son "\
"LEFT JOIN collection AS ccc ON ccc.id=cc.id_dad "\
"WHERE cc.type%s AND ccc.name=%%s" % coll_type_query
query += " ORDER BY cc.score DESC"
res = run_sql(query, query_params)
for name in res:
if not public_only or not collection_restricted_p(name[0]):
coll_sons.append(name[0])
return coll_sons
class CollectionAllChildrenDataCacher(DataCacher):
"""Cache for all children of a collection (regular & virtual, public & private)"""
def __init__(self):
def cache_filler():
def get_all_children(coll, coll_type='r', public_only=1, d_internal_coll_sons=None):
"""Return a list of all children of type 'coll_type' for collection 'coll'.
If public_only, then return only non-restricted child collections.
If coll_type='*', then return both regular and virtual collections.
d_internal_coll_sons is an internal dictionary used in recursion for
minimizing the number of database calls and should not be used outside
this scope.
"""
if not d_internal_coll_sons:
d_internal_coll_sons = {}
children = []
if coll not in d_internal_coll_sons:
d_internal_coll_sons[coll] = get_coll_sons(coll, coll_type, public_only)
for child in d_internal_coll_sons[coll]:
children.append(child)
children.extend(get_all_children(child, coll_type, public_only, d_internal_coll_sons)[0])
return children, d_internal_coll_sons
ret = {}
d_internal_coll_sons = None
collections = collection_reclist_cache.cache.keys()
for collection in collections:
ret[collection], d_internal_coll_sons = get_all_children(collection, '*', public_only=0, d_internal_coll_sons=d_internal_coll_sons)
return ret
def timestamp_verifier():
return max(get_table_update_time('collection'), get_table_update_time('collection_collection'))
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_allchildren_cache.is_ok_p:
raise Exception
except Exception:
collection_allchildren_cache = CollectionAllChildrenDataCacher()
def get_collection_allchildren(coll, recreate_cache_if_needed=True):
"""Returns the list of all children of a collection."""
if recreate_cache_if_needed:
collection_allchildren_cache.recreate_cache_if_needed()
if coll not in collection_allchildren_cache.cache:
return [] # collection does not exist; return empty list
return collection_allchildren_cache.cache[coll]
def get_coll_real_descendants(coll, coll_type='_', get_hosted_colls=True):
"""Return a list of all descendants of collection 'coll' that are defined by a 'dbquery'.
IOW, we need to decompose compound collections like "A & B" into "A" and "B" provided
that "A & B" has no associated database query defined.
"""
coll_sons = []
res = run_sql("""SELECT c.name,c.dbquery FROM collection AS c
LEFT JOIN collection_collection AS cc ON c.id=cc.id_son
LEFT JOIN collection AS ccc ON ccc.id=cc.id_dad
WHERE ccc.name=%s AND cc.type LIKE %s ORDER BY cc.score DESC""",
(coll, coll_type,))
for name, dbquery in res:
if dbquery: # this is 'real' collection, so return it:
if get_hosted_colls:
coll_sons.append(name)
else:
if not dbquery.startswith("hostedcollection:"):
coll_sons.append(name)
else: # this is 'composed' collection, so recurse:
coll_sons.extend(get_coll_real_descendants(name))
return coll_sons
def browse_pattern(req, colls, p, f, rg, ln=CFG_SITE_LANG):
"""Browse either biliographic phrases or words indexes, and display it."""
# load the right message language
_ = gettext_set_language(ln)
## is p enclosed in quotes? (coming from exact search)
if p.startswith('"') and p.endswith('"'):
p = p[1:-1]
## okay, "real browse" follows:
## FIXME: the maths in the get_nearest_terms_in_bibxxx is just a test
if not f and p.find(":") > 0: # does 'p' contain ':'?
f, p = p.split(":", 1)
## do we search in words indexes?
if not f:
return browse_in_bibwords(req, p, f)
coll_hitset = intbitset()
for coll_name in colls:
coll_hitset |= get_collection_reclist(coll_name)
index_id = get_index_id_from_field(f)
if index_id != 0:
browsed_phrases_in_colls = get_nearest_terms_in_idxphrase_with_collection(p, index_id, rg/2, rg/2, coll_hitset)
else:
browsed_phrases = get_nearest_terms_in_bibxxx(p, f, (rg+1)/2+1, (rg-1)/2+1)
while not browsed_phrases:
# try again and again with shorter and shorter pattern:
try:
p = p[:-1]
browsed_phrases = get_nearest_terms_in_bibxxx(p, f, (rg+1)/2+1, (rg-1)/2+1)
except:
register_exception(req=req, alert_admin=True)
# probably there are no hits at all:
req.write(_("No values found."))
return
## try to check hits in these particular collection selection:
browsed_phrases_in_colls = []
if 0:
for phrase in browsed_phrases:
phrase_hitset = intbitset()
phrase_hitsets = search_pattern("", phrase, f, 'e')
for coll in colls:
phrase_hitset.union_update(phrase_hitsets[coll])
if len(phrase_hitset) > 0:
# okay, this phrase has some hits in colls, so add it:
browsed_phrases_in_colls.append([phrase, len(phrase_hitset)])
## were there hits in collections?
if browsed_phrases_in_colls == []:
if browsed_phrases != []:
#write_warning(req, """<p>No match close to <em>%s</em> found in given collections.
#Please try different term.<p>Displaying matches in any collection...""" % p_orig)
## try to get nbhits for these phrases in any collection:
for phrase in browsed_phrases:
nbhits = get_nbhits_in_bibxxx(phrase, f, coll_hitset)
if nbhits > 0:
browsed_phrases_in_colls.append([phrase, nbhits])
## display results now:
out = websearch_templates.tmpl_browse_pattern(
f=f,
fn=get_field_i18nname(get_field_name(f) or f, ln, False),
ln=ln,
browsed_phrases_in_colls=browsed_phrases_in_colls,
colls=colls,
rg=rg,
)
req.write(out)
return
def browse_in_bibwords(req, p, f, ln=CFG_SITE_LANG):
"""Browse inside words indexes."""
if not p:
return
_ = gettext_set_language(ln)
urlargd = {}
urlargd.update(req.argd)
urlargd['action'] = 'search'
nearest_box = create_nearest_terms_box(urlargd, p, f, 'w', ln=ln, intro_text_p=0)
req.write(websearch_templates.tmpl_search_in_bibwords(
p = p,
f = f,
ln = ln,
nearest_box = nearest_box
))
return
def search_pattern(req=None, p=None, f=None, m=None, ap=0, of="id", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True, wl=0):
"""Search for complex pattern 'p' within field 'f' according to
matching type 'm'. Return hitset of recIDs.
The function uses multi-stage searching algorithm in case of no
exact match found. See the Search Internals document for
detailed description.
The 'ap' argument governs whether an alternative patterns are to
be used in case there is no direct hit for (p,f,m). For
example, whether to replace non-alphanumeric characters by
spaces if it would give some hits. See the Search Internals
document for detailed description. (ap=0 forbits the
alternative pattern usage, ap=1 permits it.)
'ap' is also internally used for allowing hidden tag search
(for requests coming from webcoll, for example). In this
case ap=-9
The 'of' argument governs whether to print or not some
information to the user in case of no match found. (Usually it
prints the information in case of HTML formats, otherwise it's
silent).
The 'verbose' argument controls the level of debugging information
to be printed (0=least, 9=most).
All the parameters are assumed to have been previously washed.
This function is suitable as a mid-level API.
"""
_ = gettext_set_language(ln)
hitset_empty = intbitset()
# sanity check:
if not p:
hitset_full = intbitset(trailing_bits=1)
hitset_full.discard(0)
# no pattern, so return all universe
return hitset_full
# search stage 1: break up arguments into basic search units:
if verbose and of.startswith("h"):
t1 = os.times()[4]
basic_search_units = create_basic_search_units(req, p, f, m, of)
if verbose and of.startswith("h"):
t2 = os.times()[4]
write_warning("Search stage 1: basic search units are: %s" % cgi.escape(repr(basic_search_units)), req=req)
write_warning("Search stage 1: execution took %.2f seconds." % (t2 - t1), req=req)
# search stage 2: do search for each search unit and verify hit presence:
if verbose and of.startswith("h"):
t1 = os.times()[4]
basic_search_units_hitsets = []
#prepare hiddenfield-related..
myhiddens = CFG_BIBFORMAT_HIDDEN_TAGS
can_see_hidden = False
if req:
user_info = collect_user_info(req)
can_see_hidden = user_info.get('precached_canseehiddenmarctags', False)
if not req and ap == -9: # special request, coming from webcoll
can_see_hidden = True
if can_see_hidden:
myhiddens = []
if CFG_INSPIRE_SITE and of.startswith('h'):
# fulltext/caption search warnings for INSPIRE:
fields_to_be_searched = [f for dummy_o, p, f, m in basic_search_units]
for idx_unit in xrange(len(basic_search_units)):
bsu_o, bsu_p, bsu_f, bsu_m = basic_search_units[idx_unit]
if bsu_f and len(bsu_f) < 2:
if of.startswith("h"):
write_warning(_("There is no index %s. Searching for %s in all fields." % (cgi.escape(repr(bsu_f)), cgi.escape(repr(bsu_p)))), req=req)
bsu_f = ''
bsu_m = 'w'
if of.startswith("h") and verbose:
write_warning(_('Instead searching %s.' % str([bsu_o, bsu_p, bsu_f, bsu_m])), req=req)
try:
basic_search_unit_hitset = search_unit(bsu_p, bsu_f, bsu_m, wl)
except InvenioWebSearchWildcardLimitError, excp:
basic_search_unit_hitset = excp.res
if of.startswith("h"):
write_warning(_("Search term too generic, displaying only partial results..."), req=req)
except InvenioWebSearchReferstoLimitError, excp:
basic_search_unit_hitset = excp.res
if of.startswith("h"):
write_warning(_("Search term after reference operator too generic, displaying only partial results..."), req=req)
except InvenioWebSearchCitedbyLimitError, excp:
basic_search_unit_hitset = excp.res
if of.startswith("h"):
write_warning(_("Search term after citedby operator too generic, displaying only partial results..."), req=req)
# FIXME: print warning if we use native full-text indexing
if bsu_f == 'fulltext' and bsu_m != 'w' and of.startswith('h') and not CFG_SOLR_URL:
write_warning(_("No phrase index available for fulltext yet, looking for word combination..."), req=req)
#check that the user is allowed to search with this tag
#if he/she tries it
if bsu_f and len(bsu_f) > 1 and bsu_f[0].isdigit() and bsu_f[1].isdigit():
for htag in myhiddens:
ltag = len(htag)
samelenfield = bsu_f[0:ltag]
if samelenfield == htag: #user searches by a hidden tag
#we won't show you anything..
basic_search_unit_hitset = intbitset()
if verbose >= 9 and of.startswith("h"):
write_warning("Pattern %s hitlist omitted since \
it queries in a hidden tag %s" %
(cgi.escape(repr(bsu_p)), repr(myhiddens)), req=req)
display_nearest_terms_box = False #..and stop spying, too.
if verbose >= 9 and of.startswith("h"):
write_warning("Search stage 1: pattern %s gave hitlist %s" % (cgi.escape(bsu_p), basic_search_unit_hitset), req=req)
if len(basic_search_unit_hitset) > 0 or \
ap<1 or \
bsu_o in ("|", "-") or \
((idx_unit+1)<len(basic_search_units) and basic_search_units[idx_unit+1][0]=="|"):
# stage 2-1: this basic search unit is retained, since
# either the hitset is non-empty, or the approximate
# pattern treatment is switched off, or the search unit
# was joined by an OR operator to preceding/following
# units so we do not require that it exists
basic_search_units_hitsets.append(basic_search_unit_hitset)
else:
# stage 2-2: no hits found for this search unit, try to replace non-alphanumeric chars inside pattern:
if re.search(r'[^a-zA-Z0-9\s\:]', bsu_p) and bsu_f != 'refersto' and bsu_f != 'citedby':
if bsu_p.startswith('"') and bsu_p.endswith('"'): # is it ACC query?
bsu_pn = re.sub(r'[^a-zA-Z0-9\s\:]+', "*", bsu_p)
elif bsu_f == 'journal' and len(bsu_p.split(',')) == 3 and '-' in bsu_p.split(',')[-1]:
jrn, vol, page = bsu_p.split(',')
bsu_pn = "%s,%s,%s" % (jrn, vol, page.split('-')[0])
else: # it is WRD query
bsu_pn = re.sub(r'[^a-zA-Z0-9\s\:]+', " ", bsu_p)
if verbose and of.startswith('h') and req:
write_warning("Trying (%s,%s,%s)" % (cgi.escape(bsu_pn), cgi.escape(bsu_f), cgi.escape(bsu_m)), req=req)
basic_search_unit_hitset = search_pattern(req=None, p=bsu_pn, f=bsu_f, m=bsu_m, of="id", ln=ln, wl=wl)
if len(basic_search_unit_hitset) > 0:
# we retain the new unit instead
if of.startswith('h'):
write_warning(_("No exact match found for %(x_query1)s, using %(x_query2)s instead...") %
{'x_query1': "<em>" + cgi.escape(bsu_p) + "</em>",
'x_query2': "<em>" + cgi.escape(bsu_pn) + "</em>"}, req=req)
basic_search_units[idx_unit][1] = bsu_pn
basic_search_units_hitsets.append(basic_search_unit_hitset)
else:
# stage 2-3: no hits found either, propose nearest indexed terms:
if of.startswith('h') and display_nearest_terms_box:
if req:
if bsu_f == "recid":
req.status = apache.HTTP_NOT_FOUND
write_warning(_("Requested record does not seem to exist."), req=req)
else:
write_warning(create_nearest_terms_box(req.argd, bsu_p, bsu_f, bsu_m, ln=ln), req=req)
return hitset_empty
else:
# stage 2-3: no hits found either, propose nearest indexed terms:
if of.startswith('h') and display_nearest_terms_box:
if req:
if bsu_f == "recid":
req.status = apache.HTTP_NOT_FOUND
write_warning(_("Requested record does not seem to exist."), req=req)
else:
write_warning(create_nearest_terms_box(req.argd, bsu_p, bsu_f, bsu_m, ln=ln), req=req)
return hitset_empty
if verbose and of.startswith("h"):
t2 = os.times()[4]
for idx_unit in range(0, len(basic_search_units)):
write_warning("Search stage 2: basic search unit %s gave %d hits." %
(basic_search_units[idx_unit][1:], len(basic_search_units_hitsets[idx_unit])), req=req)
write_warning("Search stage 2: execution took %.2f seconds." % (t2 - t1), req=req)
# search stage 3: apply boolean query for each search unit:
if verbose and of.startswith("h"):
t1 = os.times()[4]
# let the initial set be the complete universe:
hitset_in_any_collection = intbitset(trailing_bits=1)
hitset_in_any_collection.discard(0)
for idx_unit in xrange(len(basic_search_units)):
this_unit_operation = basic_search_units[idx_unit][0]
this_unit_hitset = basic_search_units_hitsets[idx_unit]
if this_unit_operation == '+':
hitset_in_any_collection.intersection_update(this_unit_hitset)
elif this_unit_operation == '-':
hitset_in_any_collection.difference_update(this_unit_hitset)
elif this_unit_operation == '|':
hitset_in_any_collection.union_update(this_unit_hitset)
else:
if of.startswith("h"):
write_warning("Invalid set operation %s." % cgi.escape(this_unit_operation), "Error", req=req)
if len(hitset_in_any_collection) == 0:
# no hits found, propose alternative boolean query:
if of.startswith('h') and display_nearest_terms_box:
nearestterms = []
for idx_unit in range(0, len(basic_search_units)):
bsu_o, bsu_p, bsu_f, bsu_m = basic_search_units[idx_unit]
if bsu_p.startswith("%") and bsu_p.endswith("%"):
bsu_p = "'" + bsu_p[1:-1] + "'"
bsu_nbhits = len(basic_search_units_hitsets[idx_unit])
# create a similar query, but with the basic search unit only
argd = {}
argd.update(req.argd)
argd['p'] = bsu_p
argd['f'] = bsu_f
nearestterms.append((bsu_p, bsu_nbhits, argd))
text = websearch_templates.tmpl_search_no_boolean_hits(
ln=ln, nearestterms=nearestterms)
write_warning(text, req=req)
if verbose and of.startswith("h"):
t2 = os.times()[4]
write_warning("Search stage 3: boolean query gave %d hits." % len(hitset_in_any_collection), req=req)
write_warning("Search stage 3: execution took %.2f seconds." % (t2 - t1), req=req)
return hitset_in_any_collection
def search_pattern_parenthesised(req=None, p=None, f=None, m=None, ap=0, of="id", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True, wl=0):
"""Search for complex pattern 'p' containing parenthesis within field 'f' according to
matching type 'm'. Return hitset of recIDs.
For more details on the parameters see 'search_pattern'
"""
_ = gettext_set_language(ln)
spires_syntax_converter = SpiresToInvenioSyntaxConverter()
spires_syntax_query = False
# if the pattern uses SPIRES search syntax, convert it to Invenio syntax
if spires_syntax_converter.is_applicable(p):
spires_syntax_query = True
p = spires_syntax_converter.convert_query(p)
# sanity check: do not call parenthesised parser for search terms
# like U(1) but still call it for searches like ('U(1)' | 'U(2)'):
if not re_pattern_parens.search(re_pattern_parens_quotes.sub('_', p)):
return search_pattern(req, p, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
# Try searching with parentheses
try:
parser = SearchQueryParenthesisedParser()
# get a hitset with all recids
result_hitset = intbitset(trailing_bits=1)
# parse the query. The result is list of [op1, expr1, op2, expr2, ..., opN, exprN]
parsing_result = parser.parse_query(p)
if verbose and of.startswith("h"):
write_warning("Search stage 1: search_pattern_parenthesised() searched %s." % repr(p), req=req)
write_warning("Search stage 1: search_pattern_parenthesised() returned %s." % repr(parsing_result), req=req)
# go through every pattern
# calculate hitset for it
# combine pattern's hitset with the result using the corresponding operator
for index in xrange(0, len(parsing_result)-1, 2):
current_operator = parsing_result[index]
current_pattern = parsing_result[index+1]
if CFG_INSPIRE_SITE and spires_syntax_query:
# setting ap=0 to turn off approximate matching for 0 results.
# Doesn't work well in combinations.
# FIXME: The right fix involves collecting statuses for each
# hitset, then showing a nearest terms box exactly once,
# outside this loop.
ap = 0
display_nearest_terms_box = False
# obtain a hitset for the current pattern
current_hitset = search_pattern(req, current_pattern, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
# combine the current hitset with resulting hitset using the current operator
if current_operator == '+':
result_hitset = result_hitset & current_hitset
elif current_operator == '-':
result_hitset = result_hitset - current_hitset
elif current_operator == '|':
result_hitset = result_hitset | current_hitset
else:
assert False, "Unknown operator in search_pattern_parenthesised()"
return result_hitset
# If searching with parenteses fails, perform search ignoring parentheses
except SyntaxError:
write_warning(_("Search syntax misunderstood. Ignoring all parentheses in the query. If this doesn't help, please check your search and try again."), req=req)
# remove the parentheses in the query. Current implementation removes all the parentheses,
# but it could be improved to romove only these that are not inside quotes
p = p.replace('(', ' ')
p = p.replace(')', ' ')
return search_pattern(req, p, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
def search_unit(p, f=None, m=None, wl=0, ignore_synonyms=None):
"""Search for basic search unit defined by pattern 'p' and field
'f' and matching type 'm'. Return hitset of recIDs.
All the parameters are assumed to have been previously washed.
'p' is assumed to be already a ``basic search unit'' so that it
is searched as such and is not broken up in any way. Only
wildcard and span queries are being detected inside 'p'.
If CFG_WEBSEARCH_SYNONYM_KBRS is set and we are searching in
one of the indexes that has defined runtime synonym knowledge
base, then look up there and automatically enrich search
results with results for synonyms.
In case the wildcard limit (wl) is greater than 0 and this limit
is reached an InvenioWebSearchWildcardLimitError will be raised.
In case you want to call this function with no limit for the
wildcard queries, wl should be 0.
Parameter 'ignore_synonyms' is a list of terms for which we
should not try to further find a synonym.
This function is suitable as a low-level API.
"""
## create empty output results set:
hitset = intbitset()
if not p: # sanity checking
return hitset
tokenizer = get_field_tokenizer_type(f)
hitset_cjk = intbitset()
if tokenizer == "BibIndexCJKTokenizer":
if is_there_any_CJK_character_in_text(p):
cjk_tok = BibIndexCJKTokenizer()
chars = cjk_tok.tokenize_for_words(p)
for char in chars:
hitset_cjk |= search_unit_in_bibwords(char, f, wl)
## eventually look up runtime synonyms:
hitset_synonyms = intbitset()
if CFG_WEBSEARCH_SYNONYM_KBRS.has_key(f or 'anyfield'):
if ignore_synonyms is None:
ignore_synonyms = []
ignore_synonyms.append(p)
for p_synonym in get_synonym_terms(p,
CFG_WEBSEARCH_SYNONYM_KBRS[f or 'anyfield'][0],
CFG_WEBSEARCH_SYNONYM_KBRS[f or 'anyfield'][1]):
if p_synonym.lower() != p.lower() and \
not p_synonym in ignore_synonyms:
hitset_synonyms |= search_unit(p_synonym, f, m, wl,
ignore_synonyms)
## look up hits:
if f == 'fulltext' and get_idx_indexer('fulltext') == 'SOLR' and CFG_SOLR_URL:
# redirect to Solr
try:
return search_unit_in_solr(p, f, m)
except:
# There were troubles with getting full-text search
# results from Solr. Let us alert the admin of these
# problems and let us simply return empty results to the
# end user.
register_exception()
return hitset
elif f == 'fulltext' and get_idx_indexer('fulltext') == 'XAPIAN' and CFG_XAPIAN_ENABLED:
# redirect to Xapian
try:
return search_unit_in_xapian(p, f, m)
except:
# There were troubles with getting full-text search
# results from Xapian. Let us alert the admin of these
# problems and let us simply return empty results to the
# end user.
register_exception()
return hitset
if f == 'datecreated':
hitset = search_unit_in_bibrec(p, p, 'c')
elif f == 'datemodified':
hitset = search_unit_in_bibrec(p, p, 'm')
elif f == 'earliestdate':
hitset = search_unit_in_bibrec(p, p, 'e')
elif f == 'refersto':
# we are doing search by the citation count
hitset = search_unit_refersto(p)
elif f == 'referstoexcludingselfcites':
# we are doing search by the citation count
hitset = search_unit_refersto_excluding_selfcites(p)
elif f == 'cataloguer':
# we are doing search by the cataloguer nickname
hitset = search_unit_in_record_history(p)
elif f == 'rawref':
from invenio.refextract_api import search_from_reference
field, pattern = search_from_reference(p)
return search_unit(pattern, field)
elif f == 'citedby':
# we are doing search by the citation count
hitset = search_unit_citedby(p)
elif f == 'citedbyexcludingselfcites':
# we are doing search by the citation count
hitset = search_unit_citedby_excluding_selfcites(p)
elif m == 'a' or m == 'r' or f == 'subject' or (f == 'journal' and m == 'e'):
# we are doing either phrase search or regexp search
if f == 'fulltext':
# FIXME: workaround for not having phrase index yet
return search_pattern(None, p, f, 'w')
index_id = get_index_id_from_field(f)
if index_id != 0:
if m == 'a' and index_id in get_idxpair_field_ids():
#for exact match on the admin configured fields we are searching in the pair tables
hitset = search_unit_in_idxpairs(p, f, m, wl)
else:
hitset = search_unit_in_idxphrases(p, f, m, wl)
else:
hitset = search_unit_in_bibxxx(p, f, m, wl)
# if not hitset and m == 'a' and (p[0] != '%' and p[-1] != '%'):
# #if we have no results by doing exact matching, do partial matching
# #for removing the distinction between simple and double quotes
# hitset = search_unit_in_bibxxx('%' + p + '%', f, m, wl)
elif p.startswith("cited:"):
# we are doing search by the citation count
hitset = search_unit_by_times_cited(p[6:])
elif p.startswith("citedexcludingselfcites:"):
# we are doing search by the citation count
hitset = search_unit_by_times_cited(p[24:], exclude_selfcites=True)
else:
# we are doing bibwords search by default
hitset = search_unit_in_bibwords(p, f, wl=wl)
## merge synonym results and return total:
hitset |= hitset_synonyms
hitset |= hitset_cjk
return hitset
def get_idxpair_field_ids():
"""Returns the list of ids for the fields that idxPAIRS should be used on"""
index_dict = dict(run_sql("SELECT name, id FROM idxINDEX"))
return [index_dict[field] for field in index_dict if field in CFG_WEBSEARCH_IDXPAIRS_FIELDS]
def search_unit_in_bibwords(word, f, decompress=zlib.decompress, wl=0):
"""Searches for 'word' inside bibwordsX table for field 'f' and returns hitset of recIDs."""
hitset = intbitset() # will hold output result set
set_used = 0 # not-yet-used flag, to be able to circumvent set operations
limit_reached = 0 # flag for knowing if the query limit has been reached
# if no field is specified, search in the global index.
f = f or 'anyfield'
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
stemming_language = get_index_stemming_language(index_id)
else:
return intbitset() # word index f does not exist
unicode_520 = (is_index_using_unicode_520(index_id) and
"COLLATE 'utf8_unicode_520_ci'" or "")
# wash 'word' argument and run query:
if f.endswith('count') and word.endswith('+'):
# field count query of the form N+ so transform N+ to N->99999:
word = word[:-1] + '->99999'
word = word.replace('*', '%') # we now use '*' as the truncation character
words = word.split("->", 1) # check for span query
if len(words) == 2:
word0 = re_word.sub('', words[0])
word1 = re_word.sub('', words[1])
if stemming_language:
word0 = lower_index_term(word0)
word1 = lower_index_term(word1)
# We remove trailing truncation character before stemming
if word0.endswith('%'):
word0 = stem(word0[:-1], stemming_language) + '%'
else:
word0 = stem(word0, stemming_language)
if word1.endswith('%'):
word1 = stem(word1[:-1], stemming_language) + '%'
else:
word1 = stem(word1, stemming_language)
word0_washed = wash_index_term(word0)
word1_washed = wash_index_term(word1)
if f.endswith('count'):
# field count query; convert to integers in order
# to have numerical behaviour for 'BETWEEN n1 AND n2' query
try:
word0_washed = int(word0_washed)
word1_washed = int(word1_washed)
except ValueError:
pass
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term BETWEEN %%s AND %%s %s" % (bibwordsX, unicode_520),
(word0_washed, word1_washed), wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
if f == 'journal':
pass # FIXME: quick hack for the journal index
else:
word = re_word.sub('', word)
if stemming_language:
word = lower_index_term(word)
# We remove trailing truncation character before stemming
if word.endswith('%'):
word = stem(word[:-1], stemming_language) + '%'
else:
word = stem(word, stemming_language)
if word.find('%') >= 0: # do we have wildcard in the word?
if f == 'journal':
# FIXME: quick hack for the journal index
# FIXME: we can run a sanity check here for all indexes
res = ()
else:
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term LIKE %%s %s" % (bibwordsX, unicode_520),
(wash_index_term(word),), wildcard_limit = wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
washedword = ''
if f == 'journal':
washedword = wash_index_term(word, 255)
else:
washedword = wash_index_term(word)
res = run_sql("SELECT term,hitlist FROM %s WHERE term=%%s" % bibwordsX,
(washedword,))
# fill the result set:
for word, hitlist in res:
hitset_bibwrd = intbitset(hitlist)
# add the results:
if set_used:
hitset.union_update(hitset_bibwrd)
else:
hitset = hitset_bibwrd
set_used = 1
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(hitset)
# okay, return result set:
return hitset
def search_unit_in_idxpairs(p, f, search_type, wl=0):
"""Searches for pair 'p' inside idxPAIR table for field 'f' and
returns hitset of recIDs found."""
limit_reached = 0 # flag for knowing if the query limit has been reached
do_exact_search = True # flag to know when it makes sense to try to do exact matching
result_set = intbitset()
#determine the idxPAIR table to read from
index_id = get_index_id_from_field(f)
if not index_id:
return intbitset()
unicode_520 = (is_index_using_unicode_520(index_id) and
"COLLATE 'utf8_unicode_520_ci'" or "")
stemming_language = get_index_stemming_language(index_id)
pairs_tokenizer = BibIndexDefaultTokenizer(stemming_language)
idxpair_table_washed = wash_table_column_name("idxPAIR%02dF" % index_id)
if p.startswith("%") and p.endswith("%"):
p = p[1:-1]
original_pattern = p
p = string.replace(p, '*', '%') # we now use '*' as the truncation character
queries_releated_vars = [] # contains tuples of (query_addons, query_params, use_query_limit)
#is it a span query?
ps = p.split("->", 1)
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
#so we are dealing with a span query
pairs_left = pairs_tokenizer.tokenize_for_pairs(ps[0])
pairs_right = pairs_tokenizer.tokenize_for_pairs(ps[1])
if not pairs_left or not pairs_right:
# we are not actually dealing with pairs but with words
return search_unit_in_bibwords(original_pattern, f, wl=wl)
elif len(pairs_left) != len(pairs_right):
# it is kind of hard to know what the user actually wanted
# we have to do: foo bar baz -> qux xyz, so let's swith to phrase
return search_unit_in_idxphrases(original_pattern, f, search_type, wl)
elif len(pairs_left) > 1 and \
len(pairs_right) > 1 and \
pairs_left[:-1] != pairs_right[:-1]:
# again we have something like: foo bar baz -> abc xyz qux
# so we'd better switch to phrase
return search_unit_in_idxphrases(original_pattern, f, search_type, wl)
else:
# finally, we can treat the search using idxPairs
# at this step we have either: foo bar -> abc xyz
# or foo bar abc -> foo bar xyz
queries_releated_vars = [("BETWEEN %s AND %s", (pairs_left[-1], pairs_right[-1]), True)]
for pair in pairs_left[:-1]:# which should be equal with pairs_right[:-1]
queries_releated_vars.append(("= %s", (pair, ), False))
do_exact_search = False # no exact search for span queries
elif p.find('%') > -1:
#tokenizing p will remove the '%', so we have to make sure it stays
replacement = 'xxxxxxxxxx' #hopefuly this will not clash with anything in the future
p = string.replace(p, '%', replacement)
pairs = pairs_tokenizer.tokenize_for_pairs(p)
if not pairs:
# we are not actually dealing with pairs but with words
return search_unit_in_bibwords(original_pattern, f, wl=wl)
queries_releated_vars = []
for pair in pairs:
if string.find(pair, replacement) > -1:
pair = string.replace(pair, replacement, '%') #we replace back the % sign
queries_releated_vars.append(("LIKE %s", (pair, ), True))
else:
queries_releated_vars.append(("= %s", (pair, ), False))
do_exact_search = False
else:
#normal query
pairs = pairs_tokenizer.tokenize_for_pairs(p)
if not pairs:
# we are not actually dealing with pairs but with words
return search_unit_in_bibwords(original_pattern, f, wl=wl)
queries_releated_vars = []
for pair in pairs:
queries_releated_vars.append(("= %s", (pair, ), False))
first_results = 1 # flag to know if it's the first set of results or not
for query_var in queries_releated_vars:
query_addons = query_var[0]
query_params = query_var[1]
use_query_limit = query_var[2]
if use_query_limit:
try:
res = run_sql_with_limit("SELECT term, hitlist FROM %s WHERE term %s %s"
% (idxpair_table_washed, query_addons, unicode_520), query_params, wildcard_limit=wl) #kwalitee:disable=sql
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term, hitlist FROM %s WHERE term %s %s"
% (idxpair_table_washed, query_addons, unicode_520), query_params) #kwalitee:disable=sql
if not res:
return intbitset()
for pair, hitlist in res:
hitset_idxpairs = intbitset(hitlist)
if first_results:
result_set = hitset_idxpairs
first_results = 0
else:
result_set.intersection_update(hitset_idxpairs)
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(result_set)
# check if we need to eliminate the false positives
if CFG_WEBSEARCH_IDXPAIRS_EXACT_SEARCH and do_exact_search:
# we need to eliminate the false positives
idxphrase_table_washed = wash_table_column_name("idxPHRASE%02dR" % index_id)
not_exact_search = intbitset()
for recid in result_set:
res = run_sql("SELECT termlist FROM %s WHERE id_bibrec %s" %(idxphrase_table_washed, '=%s'), (recid, )) #kwalitee:disable=sql
if res:
termlist = deserialize_via_marshal(res[0][0])
if not [term for term in termlist if term.lower().find(p.lower()) > -1]:
not_exact_search.add(recid)
else:
not_exact_search.add(recid)
# remove the recs that are false positives from the final result
result_set.difference_update(not_exact_search)
return result_set
def search_unit_in_idxphrases(p, f, search_type, wl=0):
"""Searches for phrase 'p' inside idxPHRASE*F table for field 'f' and returns hitset of recIDs found.
The search type is defined by 'type' (e.g. equals to 'r' for a regexp search)."""
# call word search method in some cases:
if f.endswith('count'):
return search_unit_in_bibwords(p, f, wl=wl)
hitset = intbitset() # will hold output result set
set_used = 0 # not-yet-used flag, to be able to circumvent set operations
limit_reached = 0 # flag for knowing if the query limit has been reached
use_query_limit = False # flag for knowing if to limit the query results or not
# deduce in which idxPHRASE table we will search:
index_id = get_index_id_from_field(f or "anyfield")
if index_id:
idxphraseX = "idxPHRASE%02dF" % index_id
else:
return intbitset() # phrase index f does not exist
unicode_520 = (is_index_using_unicode_520(index_id) and
"COLLATE 'utf8_unicode_520_ci'" or "")
# detect query type (exact phrase, partial phrase, regexp):
if search_type == 'r':
query_addons = "REGEXP %s"
query_params = (p,)
use_query_limit = True
else:
p = p.replace('*', '%') # we now use '*' as the truncation character
ps = p.split("->", 1) # check for span query:
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
query_addons = "BETWEEN %s AND %s"
query_params = (ps[0], ps[1])
use_query_limit = True
else:
if p.find('%') > -1:
query_addons = "LIKE %s"
query_params = (p,)
use_query_limit = True
else:
query_addons = "= %s"
query_params = (p,)
# special washing for fuzzy author index:
if f in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor', 'authorityauthor'):
query_params_washed = ()
for query_param in query_params:
query_params_washed += (wash_author_name(query_param),)
query_params = query_params_washed
# perform search:
if use_query_limit:
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term %s %s" % (idxphraseX, query_addons, unicode_520),
query_params, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term,hitlist FROM %s WHERE term %s %s" % (idxphraseX, query_addons, unicode_520), query_params)
# fill the result set:
for dummy_word, hitlist in res:
hitset_bibphrase = intbitset(hitlist)
# add the results:
if set_used:
hitset.union_update(hitset_bibphrase)
else:
hitset = hitset_bibphrase
set_used = 1
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(hitset)
# okay, return result set:
return hitset
def search_unit_in_bibxxx(p, f, type, wl=0):
"""Searches for pattern 'p' inside bibxxx tables for field 'f' and returns hitset of recIDs found.
The search type is defined by 'type' (e.g. equals to 'r' for a regexp search)."""
# call word search method in some cases:
if f == 'journal' or f.endswith('count'):
return search_unit_in_bibwords(p, f, wl=wl)
limit_reached = 0 # flag for knowing if the query limit has been reached
use_query_limit = False # flag for knowing if to limit the query results or not
query_addons = "" # will hold additional SQL code for the query
query_params = () # will hold parameters for the query (their number may vary depending on TYPE argument)
# wash arguments:
f = string.replace(f, '*', '%') # replace truncation char '*' in field definition
if type == 'r':
query_addons = "REGEXP %s"
query_params = (p,)
use_query_limit = True
else:
p = string.replace(p, '*', '%') # we now use '*' as the truncation character
ps = string.split(p, "->", 1) # check for span query:
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
query_addons = "BETWEEN %s AND %s"
query_params = (ps[0], ps[1])
use_query_limit = True
else:
if string.find(p, '%') > -1:
query_addons = "LIKE %s"
query_params = (p,)
use_query_limit = True
else:
query_addons = "= %s"
query_params = (p,)
# construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if len(f) >= 2 and str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
if not tl:
# f index does not exist, nevermind
pass
# okay, start search:
l = [] # will hold list of recID that matched
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
# construct and run query:
if t == "001":
if query_addons.find('BETWEEN') > -1 or query_addons.find('=') > -1:
# verify that the params are integers (to avoid returning record 123 when searching for 123foo)
try:
query_params = tuple(int(param) for param in query_params)
except ValueError:
return intbitset()
if use_query_limit:
try:
res = run_sql_with_limit("SELECT id FROM bibrec WHERE id %s" % query_addons,
query_params, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT id FROM bibrec WHERE id %s" % query_addons,
query_params)
else:
query = "SELECT bibx.id_bibrec FROM %s AS bx LEFT JOIN %s AS bibx ON bx.id=bibx.id_bibxxx WHERE bx.value %s" % \
(bx, bibx, query_addons)
if len(t) != 6 or t[-1:]=='%':
# wildcard query, or only the beginning of field 't'
# is defined, so add wildcard character:
query += " AND bx.tag LIKE %s"
query_params_and_tag = query_params + (t + '%',)
else:
# exact query for 't':
query += " AND bx.tag=%s"
query_params_and_tag = query_params + (t,)
if use_query_limit:
try:
res = run_sql_with_limit(query, query_params_and_tag, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql(query, query_params_and_tag)
# fill the result set:
for id_bibrec in res:
if id_bibrec[0]:
l.append(id_bibrec[0])
# check no of hits found:
nb_hits = len(l)
# okay, return result set:
hitset = intbitset(l)
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(hitset)
return hitset
def search_unit_in_solr(p, f=None, m=None):
"""
Query a Solr index and return an intbitset corresponding
to the result. Parameters (p,f,m) are usual search unit ones.
"""
if m and (m == 'a' or m == 'r'): # phrase/regexp query
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
p = '"' + p + '"'
return solr_get_bitset(f, p)
def search_unit_in_xapian(p, f=None, m=None):
"""
Query a Xapian index and return an intbitset corresponding
to the result. Parameters (p,f,m) are usual search unit ones.
"""
if m and (m == 'a' or m == 'r'): # phrase/regexp query
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
p = '"' + p + '"'
return xapian_get_bitset(f, p)
def search_unit_in_bibrec(datetext1, datetext2, search_type='c'):
"""
Return hitset of recIDs found that were either created or modified
(according to 'type' arg being 'c' or 'm') from datetext1 until datetext2, inclusive.
Does not pay attention to pattern, collection, anything. Useful
to intersect later on with the 'real' query.
"""
hitset = intbitset()
if search_type and search_type.startswith("m"):
search_type = "modification_date"
elif search_type and search_type.startswith("e"):
search_type = "earliest_date"
else:
search_type = "creation_date" # by default we are searching for creation dates
parts = datetext1.split('->')
if len(parts) > 1 and datetext1 == datetext2:
datetext1 = parts[0]
datetext2 = parts[1]
if datetext1 == datetext2:
res = run_sql("SELECT id FROM bibrec WHERE %s LIKE %%s" % (search_type,),
(datetext1 + '%',))
else:
res = run_sql("SELECT id FROM bibrec WHERE %s>=%%s AND %s<=%%s" % (search_type, search_type),
(datetext1, datetext2))
for row in res:
hitset += row[0]
return hitset
def search_unit_by_times_cited(p, exclude_selfcites=False):
"""
Return histset of recIDs found that are cited P times.
Usually P looks like '10->23'.
"""
numstr = '"'+p+'"'
return get_records_with_num_cites(numstr,
exclude_selfcites=exclude_selfcites)
def search_unit_refersto(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records referred to by these records.
"""
if query:
ahitset = search_pattern(p=query)
res = get_refersto_hitset(ahitset, input_limit=CFG_WEBSEARCH_MAX_RECORDS_REFERSTO)
if len(ahitset) >= CFG_WEBSEARCH_MAX_RECORDS_REFERSTO:
raise InvenioWebSearchReferstoLimitError(res)
return res
else:
return intbitset([])
def search_unit_refersto_excluding_selfcites(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records referred to by these records.
"""
if query:
ahitset = search_pattern(p=query)
citers = intbitset()
citations = get_cited_by_list(ahitset, input_limit=CFG_WEBSEARCH_MAX_RECORDS_REFERSTO)
selfcitations = get_self_cited_by_list(ahitset, input_limit=CFG_WEBSEARCH_MAX_RECORDS_REFERSTO)
for cites, selfcites in zip(citations, selfcitations):
# cites is in the form [(citee, citers), ...]
citers += cites[1] - selfcites[1]
if len(ahitset) >= CFG_WEBSEARCH_MAX_RECORDS_REFERSTO:
raise InvenioWebSearchReferstoLimitError(citers)
return citers
else:
return intbitset([])
def search_unit_in_record_history(query):
"""
Return hitset of recIDs that were modified by the given cataloguer
"""
if query:
try:
cataloguer_name, modification_date = query.split(":")
except ValueError:
cataloguer_name = query
modification_date = ""
if modification_date:
spires_syntax_converter = SpiresToInvenioSyntaxConverter()
modification_date = spires_syntax_converter.convert_date(modification_date)
parts = modification_date.split('->', 1)
if len(parts) > 1:
start_date, end_date = parts
res = run_sql("SELECT id_bibrec FROM hstRECORD WHERE job_person=%s AND job_date>=%s AND job_date<=%s",
(cataloguer_name, start_date, end_date))
else:
res = run_sql("SELECT id_bibrec FROM hstRECORD WHERE job_person=%s AND job_date LIKE %s",
(cataloguer_name, modification_date + '%',))
return intbitset(res)
else:
sql = "SELECT id_bibrec FROM hstRECORD WHERE job_person=%s"
res = intbitset(run_sql(sql, (cataloguer_name,)))
return res
else:
return intbitset([])
def search_unit_citedby(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records cited by these records.
"""
if query:
ahitset = search_pattern(p=query)
if ahitset:
res = get_citedby_hitset(ahitset, input_limit=CFG_WEBSEARCH_MAX_RECORDS_CITEDBY)
if len(ahitset) >= CFG_WEBSEARCH_MAX_RECORDS_CITEDBY:
raise InvenioWebSearchCitedbyLimitError(res)
return res
else:
return intbitset([])
else:
return intbitset([])
def search_unit_citedby_excluding_selfcites(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records referred to by these records.
"""
if query:
ahitset = search_pattern(p=query)
citees = intbitset()
references = get_refers_to_list(ahitset, input_limit=CFG_WEBSEARCH_MAX_RECORDS_CITEDBY)
selfreferences = get_self_refers_to_list(ahitset, input_limit=CFG_WEBSEARCH_MAX_RECORDS_CITEDBY)
for refs, selfrefs in zip(references, selfreferences):
# refs is in the form [(citer, citees), ...]
citees += refs[1] - selfrefs[1]
if len(ahitset) >= CFG_WEBSEARCH_MAX_RECORDS_CITEDBY:
raise InvenioWebSearchCitedbyLimitError(citees)
return citees
else:
return intbitset([])
def intersect_results_with_collrecs(req, hitset_in_any_collection, colls, of="hb", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True):
"""Return dict of hitsets given by intersection of hitset with the collection universes."""
_ = gettext_set_language(ln)
# search stage 4: intersect with the collection universe
if verbose and of.startswith("h"):
t1 = os.times()[4]
results = {} # all final results
results_nbhits = 0
# calculate the list of recids (restricted or not) that the user has rights to access and we should display (only those)
records_that_can_be_displayed = intbitset()
if not req or isinstance(req, cStringIO.OutputType): # called from CLI
user_info = {}
for coll in colls:
results[coll] = hitset_in_any_collection & get_collection_reclist(coll)
results_nbhits += len(results[coll])
records_that_can_be_displayed = hitset_in_any_collection
permitted_restricted_collections = []
else:
user_info = collect_user_info(req)
policy = CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY.strip().upper()
# let's get the restricted collections the user has rights to view
if user_info['guest'] == '1':
permitted_restricted_collections = []
## For guest users that are actually authorized to some restricted
## collection (by virtue of the IP address in a FireRole rule)
## we explicitly build the list of permitted_restricted_collections
for coll in colls:
if collection_restricted_p(coll) and (acc_authorize_action(user_info, 'viewrestrcoll', collection=coll)[0] == 0):
permitted_restricted_collections.append(coll)
else:
permitted_restricted_collections = user_info.get('precached_permitted_restricted_collections', [])
if verbose and of.startswith("h"):
write_warning("Search stage 4: Your permitted collections: %s" % (str(permitted_restricted_collections),), req=req)
# let's build the list of the both public and restricted
# child collections of the collection from which the user
# started his/her search. This list of children colls will be
# used in the warning proposing a search in that collections
try:
current_coll = req.argd['cc'] # current_coll: coll from which user started his/her search
except (AttributeError, KeyError):
current_coll = CFG_SITE_NAME
current_coll_children = get_collection_allchildren(current_coll) # real & virtual
# add all restricted collections, that the user has access to, and are under the current collection
# do not use set here, in order to maintain a specific order:
# children of 'cc' (real, virtual, restricted), rest of 'c' that are not cc's children
colls_to_be_displayed = [coll for coll in current_coll_children if coll in colls or coll in permitted_restricted_collections]
colls_to_be_displayed.extend([coll for coll in colls if coll not in colls_to_be_displayed])
if verbose and of.startswith("h"):
write_warning("Search stage 4: Collections to display: %s" % (str(colls_to_be_displayed),), req=req)
if policy == 'ANY':# the user needs to have access to at least one collection that restricts the records
#we need this to be able to remove records that are both in a public and restricted collection
permitted_recids = intbitset()
notpermitted_recids = intbitset()
for collection in restricted_collection_cache.cache:
if collection in permitted_restricted_collections:
permitted_recids |= get_collection_reclist(collection)
else:
notpermitted_recids |= get_collection_reclist(collection)
records_that_can_be_displayed = hitset_in_any_collection - (notpermitted_recids - permitted_recids)
else:# the user needs to have access to all collections that restrict a records
notpermitted_recids = intbitset()
for collection in restricted_collection_cache.cache:
if collection not in permitted_restricted_collections:
notpermitted_recids |= get_collection_reclist(collection)
records_that_can_be_displayed = hitset_in_any_collection - notpermitted_recids
for coll in colls_to_be_displayed:
results[coll] = results.get(coll, intbitset()) | (records_that_can_be_displayed & get_collection_reclist(coll))
results_nbhits += len(results[coll])
if verbose and of.startswith("h"):
write_warning("Search stage 4: Final results (%d): %s " % (results_nbhits, str(results),), req=req)
if results_nbhits == 0:
# no hits found, try to search in Home and restricted and/or hidden collections:
results = {}
results_in_Home = records_that_can_be_displayed & get_collection_reclist(CFG_SITE_NAME)
results_in_restricted_collections = intbitset()
results_in_hidden_collections = intbitset()
for coll in permitted_restricted_collections:
if not get_coll_ancestors(coll): # hidden collection
results_in_hidden_collections.union_update(records_that_can_be_displayed & get_collection_reclist(coll))
else:
results_in_restricted_collections.union_update(records_that_can_be_displayed & get_collection_reclist(coll))
# in this way, we do not count twice, records that are both in Home collection and in a restricted collection
total_results = len(results_in_Home.union(results_in_restricted_collections))
if total_results > 0:
# some hits found in Home and/or restricted collections, so propose this search:
if of.startswith("h") and display_nearest_terms_box:
url = websearch_templates.build_search_url(req.argd, cc=CFG_SITE_NAME, c=[])
len_colls_to_display = len(colls_to_be_displayed)
# trim the list of collections to first two, since it might get very large
write_warning(_("No match found in collection %(x_collection)s. Other collections gave %(x_url_open)s%(x_nb_hits)d hits%(x_url_close)s.") %
{'x_collection': '<em>' +
string.join([get_coll_i18nname(coll, ln, False) for coll in colls_to_be_displayed[:2]], ', ') +
(len_colls_to_display > 2 and ' et al' or '') + '</em>',
'x_url_open': '<a class="nearestterms" href="%s">' % (url),
'x_nb_hits': total_results,
'x_url_close': '</a>'}, req=req)
# display the hole list of collections in a comment
if len_colls_to_display > 2:
write_warning("<!--No match found in collection <em>%(x_collection)s</em>.-->" %
{'x_collection': string.join([get_coll_i18nname(coll, ln, False) for coll in colls_to_be_displayed], ', ')},
req=req)
else:
# no hits found, either user is looking for a document and he/she has not rights
# or user is looking for a hidden document:
if of.startswith("h") and display_nearest_terms_box:
if len(results_in_hidden_collections) > 0:
write_warning(_("No public collection matched your query. "
"If you were looking for a hidden document, please type "
"the correct URL for this record."), req=req)
else:
write_warning(_("No public collection matched your query. "
"If you were looking for a non-public document, please choose "
"the desired restricted collection first."), req=req)
if verbose and of.startswith("h"):
t2 = os.times()[4]
write_warning("Search stage 4: intersecting with collection universe gave %d hits." % results_nbhits, req=req)
write_warning("Search stage 4: execution took %.2f seconds." % (t2 - t1), req=req)
return results
def intersect_results_with_hitset(req, results, hitset, ap=0, aptext="", of="hb"):
"""Return intersection of search 'results' (a dict of hitsets
with collection as key) with the 'hitset', i.e. apply
'hitset' intersection to each collection within search
'results'.
If the final set is to be empty, and 'ap'
(approximate pattern) is true, and then print the `warningtext'
and return the original 'results' set unchanged. If 'ap' is
false, then return empty results set.
"""
if ap:
results_ap = copy.deepcopy(results)
else:
results_ap = {} # will return empty dict in case of no hits found
nb_total = 0
final_results = {}
for coll in results.keys():
final_results[coll] = results[coll].intersection(hitset)
nb_total += len(final_results[coll])
if nb_total == 0:
if of.startswith("h"):
write_warning(aptext, req=req)
final_results = results_ap
return final_results
def create_similarly_named_authors_link_box(author_name, ln=CFG_SITE_LANG):
"""Return a box similar to ``Not satisfied...'' one by proposing
author searches for similar names. Namely, take AUTHOR_NAME
and the first initial of the firstame (after comma) and look
into author index whether authors with e.g. middle names exist.
Useful mainly for CERN Library that sometimes contains name
forms like Ellis-N, Ellis-Nick, Ellis-Nicolas all denoting the
same person. The box isn't proposed if no similarly named
authors are found to exist.
"""
# return nothing if not configured:
if CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX == 0:
return ""
# return empty box if there is no initial:
if re.match(r'[^ ,]+, [^ ]', author_name) is None:
return ""
# firstly find name comma initial:
author_name_to_search = re.sub(r'^([^ ,]+, +[^ ,]).*$', '\\1', author_name)
# secondly search for similar name forms:
similar_author_names = {}
for name in author_name_to_search, strip_accents(author_name_to_search):
for tag in get_field_tags("author"):
# deduce into which bibxxx table we will search:
digit1, digit2 = int(tag[0]), int(tag[1])
bx = "bib%d%dx" % (digit1, digit2)
if len(tag) != 6 or tag[-1:] == '%':
# only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value LIKE %%s AND bx.tag LIKE %%s""" % bx,
(name + "%", tag + "%"))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value LIKE %%s AND bx.tag=%%s""" % bx,
(name + "%", tag))
for row in res:
similar_author_names[row[0]] = 1
# remove the original name and sort the list:
try:
del similar_author_names[author_name]
except KeyError:
pass
# thirdly print the box:
out = ""
if similar_author_names:
out_authors = similar_author_names.keys()
out_authors.sort()
tmp_authors = []
for out_author in out_authors:
nbhits = get_nbhits_in_bibxxx(out_author, "author")
if nbhits:
tmp_authors.append((out_author, nbhits))
out += websearch_templates.tmpl_similar_author_names(
authors=tmp_authors, ln=ln)
return out
def create_nearest_terms_box(urlargd, p, f, t='w', n=5, ln=CFG_SITE_LANG, intro_text_p=True):
"""Return text box containing list of 'n' nearest terms above/below 'p'
for the field 'f' for matching type 't' (words/phrases) in
language 'ln'.
Propose new searches according to `urlargs' with the new words.
If `intro_text_p' is true, then display the introductory message,
otherwise print only the nearest terms in the box content.
"""
# load the right message language
_ = gettext_set_language(ln)
if not CFG_WEBSEARCH_DISPLAY_NEAREST_TERMS:
return _("Your search did not match any records. Please try again.")
nearest_terms = []
if not p: # sanity check
p = "."
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
index_id = get_index_id_from_field(f)
if f == 'fulltext':
if CFG_SOLR_URL:
return _("No match found, please enter different search terms.")
else:
# FIXME: workaround for not having native phrase index yet
t = 'w'
# special indexes:
if f == 'refersto' or f == 'referstoexcludingselfcites':
return _("There are no records referring to %s.") % cgi.escape(p)
if f == 'cataloguer':
return _("There are no records modified by %s.") % cgi.escape(p)
if f == 'citedby' or f == 'citedbyexcludingselfcites':
return _("There are no records cited by %s.") % cgi.escape(p)
# look for nearest terms:
if t == 'w':
nearest_terms = get_nearest_terms_in_bibwords(p, f, n, n)
if not nearest_terms:
return _("No word index is available for %s.") % \
('<em>' + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + '</em>')
else:
nearest_terms = []
if index_id:
nearest_terms = get_nearest_terms_in_idxphrase(p, index_id, n, n)
if f in ('datecreated', 'datemodified', 'earliestdate'):
nearest_terms = get_nearest_terms_in_bibrec(p, f, n, n)
if not nearest_terms:
nearest_terms = get_nearest_terms_in_bibxxx(p, f, n, n)
if not nearest_terms:
return _("No phrase index is available for %s.") % \
('<em>' + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + '</em>')
terminfo = []
for term in nearest_terms:
if t == 'w':
hits = get_nbhits_in_bibwords(term, f)
else:
if index_id:
hits = get_nbhits_in_idxphrases(term, f)
elif f in ('datecreated', 'datemodified', 'earliestdate'):
hits = get_nbhits_in_bibrec(term, f)
else:
hits = get_nbhits_in_bibxxx(term, f)
argd = {}
argd.update(urlargd)
# check which fields contained the requested parameter, and replace it.
for px, dummy_fx in ('p', 'f'), ('p1', 'f1'), ('p2', 'f2'), ('p3', 'f3'):
if px in argd:
argd_px = argd[px]
if t == 'w':
# p was stripped of accents, to do the same:
argd_px = strip_accents(argd_px)
#argd[px] = string.replace(argd_px, p, term, 1)
#we need something similar, but case insensitive
pattern_index = string.find(argd_px.lower(), p.lower())
if pattern_index > -1:
argd[px] = argd_px[:pattern_index] + term + argd_px[pattern_index+len(p):]
break
#this is doing exactly the same as:
#argd[px] = re.sub('(?i)' + re.escape(p), term, argd_px, 1)
#but is ~4x faster (2us vs. 8.25us)
terminfo.append((term, hits, argd))
intro = ""
if intro_text_p: # add full leading introductory text
if f:
intro = _("Search term %(x_term)s inside index %(x_index)s did not match any record. Nearest terms in any collection are:") % \
{'x_term': "<em>" + cgi.escape(p.startswith("%") and p.endswith("%") and p[1:-1] or p) + "</em>",
'x_index': "<em>" + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + "</em>"}
else:
intro = _("Search term %s did not match any record. Nearest terms in any collection are:") % \
("<em>" + cgi.escape(p.startswith("%") and p.endswith("%") and p[1:-1] or p) + "</em>")
return websearch_templates.tmpl_nearest_term_box(p=p, ln=ln, f=f, terminfo=terminfo,
intro=intro)
def get_nearest_terms_in_bibwords(p, f, n_below, n_above):
"""Return list of +n -n nearest terms to word `p' in index for field `f'."""
nearest_words = [] # will hold the (sorted) list of nearest words to return
# deduce into which bibwordsX table we will search:
bibwordsX = "idxWORD%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
else:
return nearest_words
# firstly try to get `n' closest words above `p':
res = run_sql("SELECT term FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % bibwordsX,
(p, n_above))
for row in res:
nearest_words.append(row[0])
nearest_words.reverse()
# secondly insert given word `p':
nearest_words.append(p)
# finally try to get `n' closest words below `p':
res = run_sql("SELECT term FROM %s WHERE term>%%s ORDER BY term ASC LIMIT %%s" % bibwordsX,
(p, n_below))
for row in res:
nearest_words.append(row[0])
return nearest_words
def get_nearest_terms_in_idxphrase(p, index_id, n_below, n_above):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field idxPHRASE table,
regardless of collection.
Return list of [phrase1, phrase2, ... , phrase_n]."""
if CFG_INSPIRE_SITE and index_id in (3, 15): # FIXME: workaround due to new fuzzy index
return [p]
idxphraseX = "idxPHRASE%02dF" % index_id
res_above = run_sql("SELECT term FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % idxphraseX, (p, n_above))
res_above = [x[0] for x in res_above]
res_above.reverse()
res_below = run_sql("SELECT term FROM %s WHERE term>=%%s ORDER BY term ASC LIMIT %%s" % idxphraseX, (p, n_below))
res_below = [x[0] for x in res_below]
return res_above + res_below
def get_nearest_terms_in_idxphrase_with_collection(p, index_id, n_below, n_above, collection):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field idxPHRASE table,
considering the collection (intbitset).
Return list of [(phrase1, hitset), (phrase2, hitset), ... , (phrase_n, hitset)]."""
idxphraseX = "idxPHRASE%02dF" % index_id
res_above = run_sql("SELECT term,hitlist FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % idxphraseX, (p, n_above * 3))
res_above = [(term, intbitset(hitlist) & collection) for term, hitlist in res_above]
res_above = [(term, len(hitlist)) for term, hitlist in res_above if hitlist]
res_below = run_sql("SELECT term,hitlist FROM %s WHERE term>=%%s ORDER BY term ASC LIMIT %%s" % idxphraseX, (p, n_below * 3))
res_below = [(term, intbitset(hitlist) & collection) for term, hitlist in res_below]
res_below = [(term, len(hitlist)) for term, hitlist in res_below if hitlist]
res_above.reverse()
return res_above[-n_above:] + res_below[:n_below]
def get_nearest_terms_in_bibxxx(p, f, n_below, n_above):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field f, regardless
of collection.
Return list of [phrase1, phrase2, ... , phrase_n]."""
## determine browse field:
if not f and string.find(p, ":") > 0: # does 'p' contain ':'?
f, p = string.split(p, ":", 1)
# FIXME: quick hack for the journal index
if f == 'journal':
return get_nearest_terms_in_idxphrase(p, f, n_below, n_above)
## We are going to take max(n_below, n_above) as the number of
## values to ferch from bibXXx. This is needed to work around
## MySQL UTF-8 sorting troubles in 4.0.x. Proper solution is to
## use MySQL 4.1.x or our own idxPHRASE in the future.
index_id = get_index_id_from_field(f)
if index_id:
return get_nearest_terms_in_idxphrase(p, index_id, n_below, n_above)
n_fetch = 2*max(n_below, n_above)
## construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
## start browsing to fetch list of hits:
browsed_phrases = {} # will hold {phrase1: 1, phrase2: 1, ..., phraseN: 1} dict of browsed phrases (to make them unique)
# always add self to the results set:
browsed_phrases[p.startswith("%") and p.endswith("%") and p[1:-1] or p] = 1
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
# firstly try to get `n' closest phrases above `p':
if len(t) != 6 or t[-1:] == '%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value<%%s AND bx.tag LIKE %%s
ORDER BY bx.value DESC LIMIT %%s""" % bx,
(p, t + "%", n_fetch))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value<%%s AND bx.tag=%%s
ORDER BY bx.value DESC LIMIT %%s""" % bx,
(p, t, n_fetch))
for row in res:
browsed_phrases[row[0]] = 1
# secondly try to get `n' closest phrases equal to or below `p':
if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value>=%%s AND bx.tag LIKE %%s
ORDER BY bx.value ASC LIMIT %%s""" % bx,
(p, t + "%", n_fetch))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value>=%%s AND bx.tag=%%s
ORDER BY bx.value ASC LIMIT %%s""" % bx,
(p, t, n_fetch))
for row in res:
browsed_phrases[row[0]] = 1
# select first n words only: (this is needed as we were searching
# in many different tables and so aren't sure we have more than n
# words right; this of course won't be needed when we shall have
# one ACC table only for given field):
phrases_out = browsed_phrases.keys()
phrases_out.sort(lambda x, y: cmp(string.lower(strip_accents(x)),
string.lower(strip_accents(y))))
# find position of self:
try:
idx_p = phrases_out.index(p)
except ValueError:
idx_p = len(phrases_out)/2
# return n_above and n_below:
return phrases_out[max(0, idx_p-n_above):idx_p+n_below]
def get_nearest_terms_in_bibrec(p, f, n_below, n_above):
"""Return list of nearest terms and counts from bibrec table.
p is usually a date, and f either datecreated or datemodified or earliestdate.
Note: below/above count is very approximative, not really respected.
"""
col = 'creation_date'
if f == 'datemodified':
col = 'modification_date'
elif f == 'earliestdate':
col = 'earliest_date'
res_above = run_sql("""SELECT DATE_FORMAT(%s,'%%%%Y-%%%%m-%%%%d %%%%H:%%%%i:%%%%s')
FROM bibrec WHERE %s < %%s
ORDER BY %s DESC LIMIT %%s""" % (col, col, col),
(p, n_above))
res_below = run_sql("""SELECT DATE_FORMAT(%s,'%%%%Y-%%%%m-%%%%d %%%%H:%%%%i:%%%%s')
FROM bibrec WHERE %s > %%s
ORDER BY %s ASC LIMIT %%s""" % (col, col, col),
(p, n_below))
out = set([])
for row in res_above:
out.add(row[0])
for row in res_below:
out.add(row[0])
out_list = list(out)
out_list.sort()
return list(out_list)
def get_nbhits_in_bibrec(term, f):
"""Return number of hits in bibrec table. term is usually a date,
and f is either 'datecreated' or 'datemodified' or 'earliestdate'."""
col = 'creation_date'
if f == 'datemodified':
col = 'modification_date'
elif f == 'earliestdate':
col = 'earliest_date'
res = run_sql("SELECT COUNT(*) FROM bibrec WHERE %s LIKE %%s" % (col,),
(term + '%',))
return res[0][0]
def get_nbhits_in_bibwords(word, f):
"""Return number of hits for word 'word' inside words index for field 'f'."""
out = 0
# deduce into which bibwordsX table we will search:
bibwordsX = "idxWORD%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
else:
return 0
if word:
res = run_sql("SELECT hitlist FROM %s WHERE term=%%s" % bibwordsX,
(word,))
for hitlist in res:
out += len(intbitset(hitlist[0]))
return out
def get_nbhits_in_idxphrases(word, f):
"""Return number of hits for word 'word' inside phrase index for field 'f'."""
out = 0
# deduce into which bibwordsX table we will search:
idxphraseX = "idxPHRASE%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
idxphraseX = "idxPHRASE%02dF" % index_id
else:
return 0
if word:
res = run_sql("SELECT hitlist FROM %s WHERE term=%%s" % idxphraseX,
(word,))
for hitlist in res:
out += len(intbitset(hitlist[0]))
return out
def get_nbhits_in_bibxxx(p, f, in_hitset=None):
"""Return number of hits for word 'word' inside words index for field 'f'."""
## determine browse field:
if not f and string.find(p, ":") > 0: # does 'p' contain ':'?
f, p = string.split(p, ":", 1)
# FIXME: quick hack for the journal index
if f == 'journal':
return get_nbhits_in_idxphrases(p, f)
## construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
# start searching:
recIDs = {} # will hold dict of {recID1: 1, recID2: 1, ..., } (unique recIDs, therefore)
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bibx.id_bibrec FROM %s AS bibx, %s AS bx
WHERE bx.value=%%s AND bx.tag LIKE %%s
AND bibx.id_bibxxx=bx.id""" % (bibx, bx),
(p, t + "%"))
else:
res = run_sql("""SELECT bibx.id_bibrec FROM %s AS bibx, %s AS bx
WHERE bx.value=%%s AND bx.tag=%%s
AND bibx.id_bibxxx=bx.id""" % (bibx, bx),
(p, t))
for row in res:
recIDs[row[0]] = 1
if in_hitset is None:
nbhits = len(recIDs)
else:
nbhits = len(intbitset(recIDs.keys()).intersection(in_hitset))
return nbhits
def get_mysql_recid_from_aleph_sysno(sysno):
"""Returns DB's recID for ALEPH sysno passed in the argument (e.g. "002379334CER").
Returns None in case of failure."""
out = None
res = run_sql("""SELECT bb.id_bibrec FROM bibrec_bib97x AS bb, bib97x AS b
WHERE b.value=%s AND b.tag='970__a' AND bb.id_bibxxx=b.id""",
(sysno,))
if res:
out = res[0][0]
return out
def guess_primary_collection_of_a_record(recID):
"""Return primary collection name a record recid belongs to, by
testing 980 identifier.
May lead to bad guesses when a collection is defined dynamically
via dbquery.
In that case, return 'CFG_SITE_NAME'."""
out = CFG_SITE_NAME
dbcollids = get_fieldvalues(recID, "980__a")
for dbcollid in dbcollids:
variants = ("collection:" + dbcollid,
'collection:"' + dbcollid + '"',
"980__a:" + dbcollid,
'980__a:"' + dbcollid + '"',
'980:' + dbcollid ,
'980:"' + dbcollid + '"')
res = run_sql("SELECT name FROM collection WHERE dbquery IN (%s,%s,%s,%s,%s,%s)", variants)
if res:
out = res[0][0]
break
if CFG_CERN_SITE:
recID = int(recID)
# dirty hack for ATLAS collections at CERN:
if out in ('ATLAS Communications', 'ATLAS Internal Notes'):
for alternative_collection in ('ATLAS Communications Physics',
'ATLAS Communications General',
'ATLAS Internal Notes Physics',
'ATLAS Internal Notes General',):
if recID in get_collection_reclist(alternative_collection):
return alternative_collection
# dirty hack for FP
FP_collections = {'DO': ['Current Price Enquiries', 'Archived Price Enquiries'],
'IT': ['Current Invitation for Tenders', 'Archived Invitation for Tenders'],
'MS': ['Current Market Surveys', 'Archived Market Surveys']}
fp_coll_ids = [coll for coll in dbcollids if coll in FP_collections]
for coll in fp_coll_ids:
for coll_name in FP_collections[coll]:
if recID in get_collection_reclist(coll_name):
return coll_name
return out
_re_collection_url = re.compile('/collection/(.+)')
def guess_collection_of_a_record(recID, referer=None, recreate_cache_if_needed=True):
"""Return collection name a record recid belongs to, by first testing
the referer URL if provided and otherwise returning the
primary collection."""
if referer:
dummy, hostname, path, dummy, query, dummy = urlparse.urlparse(referer)
#requests can come from different invenio installations, with different collections
if CFG_SITE_URL.find(hostname) < 0:
return guess_primary_collection_of_a_record(recID)
g = _re_collection_url.match(path)
if g:
name = urllib.unquote_plus(g.group(1))
#check if this collection actually exist (also normalize the name if case-insensitive)
name = get_coll_normalised_name(name)
if name and recID in get_collection_reclist(name):
return name
elif path.startswith('/search'):
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
query = cgi.parse_qs(query)
for name in query.get('cc', []) + query.get('c', []):
name = get_coll_normalised_name(name)
if name and recID in get_collection_reclist(name, recreate_cache_if_needed=False):
return name
return guess_primary_collection_of_a_record(recID)
def is_record_in_any_collection(recID, recreate_cache_if_needed=True):
"""Return True if the record belongs to at least one collection. This is a
good, although not perfect, indicator to guess if webcoll has already run
after this record has been entered into the system.
"""
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
for name in collection_reclist_cache.cache.keys():
if recID in get_collection_reclist(name, recreate_cache_if_needed=False):
return True
return False
def get_all_collections_of_a_record(recID, recreate_cache_if_needed=True):
"""Return all the collection names a record belongs to.
Note this function is O(n_collections)."""
ret = []
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
for name in collection_reclist_cache.cache.keys():
if recID in get_collection_reclist(name, recreate_cache_if_needed=False):
ret.append(name)
return ret
def get_tag_name(tag_value, prolog="", epilog=""):
"""Return tag name from the known tag value, by looking up the 'tag' table.
Return empty string in case of failure.
Example: input='100__%', output=first author'."""
out = ""
res = run_sql("SELECT name FROM tag WHERE value=%s", (tag_value,))
if res:
out = prolog + res[0][0] + epilog
return out
def get_fieldcodes():
"""Returns a list of field codes that may have been passed as 'search options' in URL.
Example: output=['subject','division']."""
out = []
res = run_sql("SELECT DISTINCT(code) FROM field")
for row in res:
out.append(row[0])
return out
def get_field_name(code):
"""Return the corresponding field_name given the field code.
e.g. reportnumber -> report number."""
res = run_sql("SELECT name FROM field WHERE code=%s", (code, ))
if res:
return res[0][0]
else:
return ""
def get_fieldvalues_alephseq_like(recID, tags_in, can_see_hidden=False):
"""Return buffer of ALEPH sequential-like textual format with fields found
in the list TAGS_IN for record RECID.
If can_see_hidden is True, just print everything. Otherwise hide fields
from CFG_BIBFORMAT_HIDDEN_TAGS.
"""
out = ""
if type(tags_in) is not list:
tags_in = [tags_in]
if len(tags_in) == 1 and len(tags_in[0]) == 6:
## case A: one concrete subfield asked, so print its value if found
## (use with care: can mislead if field has multiple occurrences)
out += string.join(get_fieldvalues(recID, tags_in[0]), "\n")
else:
## case B: print our "text MARC" format; works safely all the time
# find out which tags to output:
dict_of_tags_out = {}
if not tags_in:
for i in range(0, 10):
for j in range(0, 10):
dict_of_tags_out["%d%d%%" % (i, j)] = 1
else:
for tag in tags_in:
tag = tag.strip()
if len(tag) == 0:
for i in range(0, 10):
for j in range(0, 10):
dict_of_tags_out["%d%d%%" % (i, j)] = 1
elif len(tag) == 1:
for j in range(0, 10):
dict_of_tags_out["%s%d%%" % (tag, j)] = 1
elif len(tag) < 5:
dict_of_tags_out["%s%%" % tag] = 1
elif tag >= 6:
dict_of_tags_out[tag[0:5]] = 1
tags_out = dict_of_tags_out.keys()
tags_out.sort()
# search all bibXXx tables as needed:
for tag in tags_out:
digits = tag[0:2]
try:
intdigits = int(digits)
if intdigits < 0 or intdigits > 99:
raise ValueError
except ValueError:
# invalid tag value asked for
continue
if tag.startswith("001") or tag.startswith("00%"):
if out:
out += "\n"
out += "%09d %s %d" % (recID, "001__", recID)
bx = "bib%sx" % digits
bibx = "bibrec_bib%sx" % digits
query = "SELECT b.tag,b.value,bb.field_number FROM %s AS b, %s AS bb "\
"WHERE bb.id_bibrec=%%s AND b.id=bb.id_bibxxx AND b.tag LIKE %%s "\
"ORDER BY bb.field_number, b.tag ASC" % (bx, bibx)
res = run_sql(query, (recID, str(tag)+'%'))
# go through fields:
field_number_old = -999
field_old = ""
for row in res:
field, value, field_number = row[0], row[1], row[2]
ind1, ind2 = field[3], field[4]
printme = True
#check the stuff in hiddenfields
if not can_see_hidden:
for htag in CFG_BIBFORMAT_HIDDEN_TAGS:
ltag = len(htag)
samelenfield = field[0:ltag]
if samelenfield == htag:
printme = False
if ind1 == "_":
ind1 = ""
if ind2 == "_":
ind2 = ""
# print field tag
if printme:
if field_number != field_number_old or field[:-1] != field_old[:-1]:
if out:
out += "\n"
out += "%09d %s " % (recID, field[:5])
field_number_old = field_number
field_old = field
# print subfield value
if field[0:2] == "00" and field[-1:] == "_":
out += value
else:
out += "$$%s%s" % (field[-1:], value)
return out
def get_merged_recid(recID):
""" Return the record ID of the record with
which the given record has been merged.
@param recID: deleted record recID
@type recID: int
@return: merged record recID
@rtype: int or None
"""
merged_recid = None
for val in get_fieldvalues(recID, "970__d"):
try:
merged_recid = int(val)
break
except ValueError:
pass
return merged_recid
def record_empty(recID):
"""
Is this record empty, e.g. has only 001, waiting for integration?
@param recID: the record identifier.
@type recID: int
@return: 1 if the record is empty, 0 otherwise.
@rtype: int
"""
return bibrecord.record_empty(get_record(recID))
def record_public_p(recID, recreate_cache_if_needed=True):
"""Return 1 if the record is public, i.e. if it can be found in the Home collection.
Return 0 otherwise.
"""
return recID in get_collection_reclist(CFG_SITE_NAME, recreate_cache_if_needed=recreate_cache_if_needed)
def get_creation_date(recID, fmt="%Y-%m-%d"):
"Returns the creation date of the record 'recID'."
out = ""
res = run_sql("SELECT DATE_FORMAT(creation_date,%s) FROM bibrec WHERE id=%s", (fmt, recID), 1)
if res:
out = res[0][0]
return out
def get_modification_date(recID, fmt="%Y-%m-%d"):
"Returns the date of last modification for the record 'recID'."
out = ""
res = run_sql("SELECT DATE_FORMAT(modification_date,%s) FROM bibrec WHERE id=%s", (fmt, recID), 1)
if res:
out = res[0][0]
return out
def get_earliest_date(recID, fmt="%Y-%m-%d"):
"Returns the earliest date for the record 'recID'."
out = ""
res = run_sql("SELECT DATE_FORMAT(earliest_date,%s) FROM bibrec WHERE id=%s", (fmt, recID), 1)
if res:
out = res[0][0]
return out
def print_search_info(p, f, sf, so, sp, rm, of, ot, collection=CFG_SITE_NAME, nb_found=-1, jrec=1, rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS,
aas=0, ln=CFG_SITE_LANG, p1="", p2="", p3="", f1="", f2="", f3="", m1="", m2="", m3="", op1="", op2="",
sc=1, pl_in_url="",
d1y=0, d1m=0, d1d=0, d2y=0, d2m=0, d2d=0, dt="",
cpu_time=-1, middle_only=0, em=""):
"""Prints stripe with the information on 'collection' and 'nb_found' results and CPU time.
Also, prints navigation links (beg/next/prev/end) inside the results set.
If middle_only is set to 1, it will only print the middle box information (beg/netx/prev/end/etc) links.
This is suitable for displaying navigation links at the bottom of the search results page."""
if em != '' and EM_REPOSITORY["search_info"] not in em:
return ""
# sanity check:
if jrec < 1:
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
return websearch_templates.tmpl_print_search_info(
ln = ln,
collection = collection,
aas = aas,
collection_name = get_coll_i18nname(collection, ln, False),
collection_id = get_colID(collection),
middle_only = middle_only,
rg = rg,
nb_found = nb_found,
sf = sf,
so = so,
rm = rm,
of = of,
ot = ot,
p = p,
f = f,
p1 = p1,
p2 = p2,
p3 = p3,
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
op1 = op1,
op2 = op2,
pl_in_url = pl_in_url,
d1y = d1y,
d1m = d1m,
d1d = d1d,
d2y = d2y,
d2m = d2m,
d2d = d2d,
dt = dt,
jrec = jrec,
sc = sc,
sp = sp,
all_fieldcodes = get_fieldcodes(),
cpu_time = cpu_time,
)
def print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, collection=CFG_SITE_NAME, nb_found=-1, jrec=1, rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS,
aas=0, ln=CFG_SITE_LANG, p1="", p2="", p3="", f1="", f2="", f3="", m1="", m2="", m3="", op1="", op2="",
sc=1, pl_in_url="",
d1y=0, d1m=0, d1d=0, d2y=0, d2m=0, d2d=0, dt="",
cpu_time=-1, middle_only=0, em=""):
"""Prints stripe with the information on 'collection' and 'nb_found' results and CPU time.
Also, prints navigation links (beg/next/prev/end) inside the results set.
If middle_only is set to 1, it will only print the middle box information (beg/netx/prev/end/etc) links.
This is suitable for displaying navigation links at the bottom of the search results page."""
if em != '' and EM_REPOSITORY["search_info"] not in em:
return ""
# sanity check:
if jrec < 1:
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
return websearch_templates.tmpl_print_hosted_search_info(
ln = ln,
collection = collection,
aas = aas,
collection_name = get_coll_i18nname(collection, ln, False),
collection_id = get_colID(collection),
middle_only = middle_only,
rg = rg,
nb_found = nb_found,
sf = sf,
so = so,
rm = rm,
of = of,
ot = ot,
p = p,
f = f,
p1 = p1,
p2 = p2,
p3 = p3,
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
op1 = op1,
op2 = op2,
pl_in_url = pl_in_url,
d1y = d1y,
d1m = d1m,
d1d = d1d,
d2y = d2y,
d2m = d2m,
d2d = d2d,
dt = dt,
jrec = jrec,
sc = sc,
sp = sp,
all_fieldcodes = get_fieldcodes(),
cpu_time = cpu_time,
)
def print_results_overview(colls, results_final_nb_total, results_final_nb, cpu_time, ln=CFG_SITE_LANG, ec=[], hosted_colls_potential_results_p=False, em=""):
"""Prints results overview box with links to particular collections below."""
if em != "" and EM_REPOSITORY["overview"] not in em:
return ""
new_colls = []
for coll in colls:
new_colls.append({
'id': get_colID(coll),
'code': coll,
'name': get_coll_i18nname(coll, ln, False),
})
return websearch_templates.tmpl_print_results_overview(
ln = ln,
results_final_nb_total = results_final_nb_total,
results_final_nb = results_final_nb,
cpu_time = cpu_time,
colls = new_colls,
ec = ec,
hosted_colls_potential_results_p = hosted_colls_potential_results_p,
)
def print_hosted_results(url_and_engine, ln=CFG_SITE_LANG, of=None, req=None, no_records_found=False, search_timed_out=False, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS, em = ""):
"""Prints the full results of a hosted collection"""
if of.startswith("h"):
if no_records_found:
return "<br />No results found."
if search_timed_out:
return "<br />The search engine did not respond in time."
return websearch_templates.tmpl_print_hosted_results(
url_and_engine=url_and_engine,
ln=ln,
of=of,
req=req,
limit=limit,
display_body = em == "" or EM_REPOSITORY["body"] in em,
display_add_to_basket = em == "" or EM_REPOSITORY["basket"] in em)
class BibSortDataCacher(DataCacher):
"""
Cache holding all structures created by bibsort
( _data, data_dict).
"""
def __init__(self, method_name):
self.method_name = method_name
self.method_id = 0
res = run_sql("""SELECT id from bsrMETHOD where name = %s""", (self.method_name,))
if res and res[0]:
self.method_id = res[0][0]
else:
self.method_id = 0
def cache_filler():
method_id = self.method_id
alldicts = {}
if self.method_id == 0:
return {}
try:
res_data = run_sql("""SELECT data_dict_ordered from bsrMETHODDATA \
where id_bsrMETHOD = %s""", (method_id,))
res_buckets = run_sql("""SELECT bucket_no, bucket_data from bsrMETHODDATABUCKET\
where id_bsrMETHOD = %s""", (method_id,))
except Exception:
# database problems, return empty cache
return {}
try:
data_dict_ordered = deserialize_via_marshal(res_data[0][0])
except IndexError:
data_dict_ordered = {}
alldicts['data_dict_ordered'] = data_dict_ordered # recid: weight
if not res_buckets:
alldicts['bucket_data'] = {}
return alldicts
for row in res_buckets:
bucket_no = row[0]
try:
bucket_data = intbitset(row[1])
except IndexError:
bucket_data = intbitset([])
alldicts.setdefault('bucket_data', {})[bucket_no] = bucket_data
return alldicts
def timestamp_verifier():
method_id = self.method_id
res = run_sql("""SELECT last_updated from bsrMETHODDATA where id_bsrMETHOD = %s""", (method_id,))
try:
update_time_methoddata = str(res[0][0])
except IndexError:
update_time_methoddata = '1970-01-01 00:00:00'
res = run_sql("""SELECT max(last_updated) from bsrMETHODDATABUCKET where id_bsrMETHOD = %s""", (method_id,))
try:
update_time_buckets = str(res[0][0])
except IndexError:
update_time_buckets = '1970-01-01 00:00:00'
return max(update_time_methoddata, update_time_buckets)
DataCacher.__init__(self, cache_filler, timestamp_verifier)
def get_sorting_methods():
res = run_sql("""SELECT m.name, m.definition
FROM bsrMETHOD m, bsrMETHODDATA md
WHERE m.id = md.id_bsrMETHOD""")
return dict(res)
SORTING_METHODS = get_sorting_methods()
CACHE_SORTED_DATA = {}
for sorting_method in SORTING_METHODS:
CACHE_SORTED_DATA[sorting_method] = None
def get_tags_from_sort_fields(sort_fields):
"""Given a list of sort_fields, return the tags associated with it and
also the name of the field that has no tags associated, to be able to
display a message to the user."""
tags = []
if not sort_fields:
return [], ''
for sort_field in sort_fields:
if sort_field and (len(sort_field) > 1 and str(sort_field[0:2]).isdigit()):
# sort_field starts by two digits, so this is probably a MARC tag already
tags.append(sort_field)
else:
# let us check the 'field' table
field_tags = get_field_tags(sort_field)
if field_tags:
tags.extend(field_tags)
else:
return [], sort_field
return tags, ''
def rank_records(req, rank_method_code, rank_limit_relevance, hitset_global, pattern=None, verbose=0, sort_order='d', of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None, field='', sorting_methods=SORTING_METHODS):
"""Initial entry point for ranking records, acts like a dispatcher.
(i) rank_method_code is in bsrMETHOD, bibsort buckets can be used;
(ii)rank_method_code is not in bsrMETHOD, use bibrank;
"""
# Special case: sorting by citations is fast because we store the
# ranking dictionary in memory, so we do not use bibsort buckets.
if CFG_BIBSORT_ENABLED and sorting_methods and rank_method_code != 'citation':
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if definition.startswith('RNK') and \
definition.replace('RNK:', '').strip().lower() == rank_method_code.lower():
solution_recs, solution_scores = \
sort_records_bibsort(req, hitset_global, sort_method,
'', sort_order, verbose, of, ln,
rg, jrec, 'r')
comment = ''
if verbose > 0:
comment = 'find_citations retlist %s' % [[solution_recs[i], solution_scores[i]] for i in range(len(solution_recs))]
return solution_recs, solution_scores, '(', ')', comment
if rank_method_code.lower() == 'citation':
related_to = []
else:
related_to = pattern
solution_recs, solution_scores, prefix, suffix, comment = \
rank_records_bibrank(rank_method_code=rank_method_code,
rank_limit_relevance=rank_limit_relevance,
hitset=hitset_global,
related_to=related_to,
verbose=verbose,
field=field,
rg=rg,
jrec=jrec)
# Solution recs can be None, in case of error or other cases
# which should be all be changed to return an empty list.
if solution_recs and sort_order == 'd':
solution_recs.reverse()
solution_scores.reverse()
return solution_recs, solution_scores, prefix, suffix, comment
def sort_records_latest(recIDs, jrec, rg, sort_order):
if sort_order == 'd':
recIDs.reverse()
return slice_records(recIDs, jrec, rg)
def sort_records(req, recIDs, sort_field='', sort_order='d', sort_pattern='', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None, sorting_methods=SORTING_METHODS):
"""Initial entry point for sorting records, acts like a dispatcher.
(i) sort_field is in the bsrMETHOD, and thus, the BibSort has sorted the data for this field, so we can use the cache;
(ii)sort_field is not in bsrMETHOD, and thus, the cache does not contain any information regarding this sorting method"""
_ = gettext_set_language(ln)
#bibsort does not handle sort_pattern for now, use bibxxx
if sort_pattern:
return sort_records_bibxxx(req, recIDs, None, sort_field, sort_order, sort_pattern, verbose, of, ln, rg, jrec)
#ignore the use of buckets, use old fashion sorting
use_sorting_buckets = CFG_BIBSORT_ENABLED and sorting_methods
if not sort_field:
if use_sorting_buckets:
return sort_records_bibsort(req, recIDs, 'latest first', sort_field, sort_order, verbose, of, ln, rg, jrec)
else:
return sort_records_latest(recIDs, jrec, rg, sort_order)
sort_fields = sort_field.split(",")
if len(sort_fields) == 1:
# we have only one sorting_field, check if it is treated by BibSort
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if use_sorting_buckets and \
((definition.startswith('FIELD') and
definition.replace('FIELD:', '').strip().lower() == sort_fields[0].lower()) or
sort_method == sort_fields[0]):
#use BibSort
return sort_records_bibsort(req, recIDs, sort_method, sort_field, sort_order, verbose, of, ln, rg, jrec)
#deduce sorting MARC tag out of the 'sort_field' argument:
tags, error_field = get_tags_from_sort_fields(sort_fields)
if error_field:
if use_sorting_buckets:
return sort_records_bibsort(req, recIDs, 'latest first', sort_field, sort_order, verbose, of, ln, rg, jrec)
else:
if of.startswith('h'):
write_warning(_("Sorry, %s does not seem to be a valid sort option. The records will not be sorted.") % cgi.escape(error_field), "Error", req=req)
return slice_records(recIDs, jrec, rg)
elif tags:
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if definition.startswith('MARC') \
and definition.replace('MARC:', '').strip().split(',') == tags \
and use_sorting_buckets:
#this list of tags have a designated method in BibSort, so use it
return sort_records_bibsort(req, recIDs, sort_method, sort_field, sort_order, verbose, of, ln, rg, jrec)
#we do not have this sort_field in BibSort tables -> do the old fashion sorting
return sort_records_bibxxx(req, recIDs, tags, sort_field, sort_order, sort_pattern, verbose, of, ln, rg, jrec)
else:
return slice_records(recIDs, jrec, rg)
def sort_records_bibsort(req, recIDs, sort_method, sort_field='', sort_order='d', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=1, sort_or_rank='s', sorting_methods=SORTING_METHODS):
"""This function orders the recIDs list, based on a sorting method(sort_field) using the BibSortDataCacher for speed"""
_ = gettext_set_language(ln)
if not jrec:
jrec = 1
#sanity check
if sort_method not in sorting_methods:
if sort_or_rank == 'r':
return rank_records_bibrank(rank_method_code=sort_method,
rank_limit_relevance=0,
hitset=recIDs,
verbose=verbose)
else:
return sort_records_bibxxx(req, recIDs, None, sort_field, sort_order, '', verbose, of, ln, rg, jrec)
if verbose >= 3 and of.startswith('h'):
write_warning("Sorting (using BibSort cache) by method %s (definition %s)."
% (cgi.escape(repr(sort_method)), cgi.escape(repr(sorting_methods[sort_method]))), req=req)
#we should return sorted records up to irec_max(exclusive)
dummy, irec_max = get_interval_for_records_to_sort(len(recIDs), jrec, rg)
solution = intbitset()
input_recids = intbitset(recIDs)
if CACHE_SORTED_DATA[sort_method] is None:
CACHE_SORTED_DATA[sort_method] = BibSortDataCacher(sort_method)
CACHE_SORTED_DATA[sort_method].recreate_cache_if_needed()
sort_cache = CACHE_SORTED_DATA[sort_method].cache
bucket_numbers = sort_cache['bucket_data'].keys()
#check if all buckets have been constructed
if len(bucket_numbers) != CFG_BIBSORT_BUCKETS:
if verbose > 3 and of.startswith('h'):
write_warning("Not all buckets have been constructed.. switching to old fashion sorting.", req=req)
if sort_or_rank == 'r':
return rank_records_bibrank(rank_method_code=sort_method,
rank_limit_relevance=0,
hitset=recIDs,
verbose=verbose)
else:
return sort_records_bibxxx(req, recIDs, None, sort_field,
sort_order, '', verbose, of, ln, rg,
jrec)
if sort_order == 'd':
bucket_numbers.reverse()
for bucket_no in bucket_numbers:
solution.union_update(input_recids & sort_cache['bucket_data'][bucket_no])
if len(solution) >= irec_max:
break
dict_solution = {}
missing_records = intbitset()
for recid in solution:
try:
dict_solution[recid] = sort_cache['data_dict_ordered'][recid]
except KeyError:
#recid is in buckets, but not in the bsrMETHODDATA,
#maybe because the value has been deleted, but the change has not yet been propagated to the buckets
missing_records.add(recid)
#check if there are recids that are not in any bucket -> to be added at the end/top, ordered by insertion date
if len(solution) < irec_max:
#some records have not been yet inserted in the bibsort structures
#or, some records have no value for the sort_method
missing_records += input_recids - solution
#the records need to be sorted in reverse order for the print record function
#the return statement should be equivalent with the following statements
#(these are clearer, but less efficient, since they revert the same list twice)
#sorted_solution = (missing_records + sorted(dict_solution, key=dict_solution.__getitem__, reverse=sort_order=='d'))[:irec_max]
#sorted_solution.reverse()
#return sorted_solution
reverse = sort_order == 'd'
if sort_method.strip().lower().startswith('latest') and reverse:
# If we want to sort the records on their insertion date, add the missing records at the top
solution = sorted(dict_solution, key=dict_solution.__getitem__, reverse=True) + sorted(missing_records, reverse=True)
else:
solution = sorted(missing_records) + sorted(dict_solution, key=dict_solution.__getitem__, reverse=reverse)
# Only keep records, we are going to display
index_min = jrec - 1
if rg:
index_max = index_min + rg
solution = solution[index_min:index_max]
else:
solution = solution[index_min:]
if sort_or_rank == 'r':
# We need the recids, with their ranking score
return solution, [dict_solution.get(record, 0) for record in solution]
else:
return solution
def slice_records(recIDs, jrec, rg):
if not jrec:
jrec = 1
if rg:
recIDs = recIDs[jrec-1:jrec-1+rg]
else:
recIDs = recIDs[jrec-1:]
return recIDs
def sort_records_bibxxx(req, recIDs, tags, sort_field='', sort_order='d', sort_pattern='', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None):
"""OLD FASHION SORTING WITH NO CACHE, for sort fields that are not run in BibSort
Sort records in 'recIDs' list according sort field 'sort_field' in order 'sort_order'.
If more than one instance of 'sort_field' is found for a given record, try to choose that that is given by
'sort pattern', for example "sort by report number that starts by CERN-PS".
Note that 'sort_field' can be field code like 'author' or MARC tag like '100__a' directly."""
_ = gettext_set_language(ln)
## check arguments:
if not sort_field:
return slice_records(recIDs, jrec, rg)
if len(recIDs) > CFG_WEBSEARCH_NB_RECORDS_TO_SORT:
if of.startswith('h'):
write_warning(_("Sorry, sorting is allowed on sets of up to %d records only. Using default sort order.") % CFG_WEBSEARCH_NB_RECORDS_TO_SORT, "Warning", req=req)
return slice_records(recIDs, jrec, rg)
recIDs_dict = {}
recIDs_out = []
if not tags:
# tags have not been camputed yet
sort_fields = sort_field.split(',')
tags, error_field = get_tags_from_sort_fields(sort_fields)
if error_field:
if of.startswith('h'):
write_warning(_("Sorry, %s does not seem to be a valid sort option. The records will not be sorted.") % cgi.escape(error_field), "Error", req=req)
return slice_records(recIDs, jrec, rg)
if verbose >= 3 and of.startswith('h'):
write_warning("Sorting by tags %s." % cgi.escape(repr(tags)), req=req)
if sort_pattern:
write_warning("Sorting preferentially by %s." % cgi.escape(sort_pattern), req=req)
## check if we have sorting tag defined:
if tags:
# fetch the necessary field values:
for recID in recIDs:
val = "" # will hold value for recID according to which sort
vals = [] # will hold all values found in sorting tag for recID
for tag in tags:
if CFG_CERN_SITE and tag == '773__c':
# CERN hack: journal sorting
# 773__c contains page numbers, e.g. 3-13, and we want to sort by 3, and numerically:
vals.extend(["%050s" % x.split("-", 1)[0] for x in get_fieldvalues(recID, tag)])
else:
vals.extend(get_fieldvalues(recID, tag))
if sort_pattern:
# try to pick that tag value that corresponds to sort pattern
bingo = 0
for v in vals:
if v.lower().startswith(sort_pattern.lower()): # bingo!
bingo = 1
val = v
break
if not bingo: # sort_pattern not present, so add other vals after spaces
val = sort_pattern + " " + ''.join(vals)
else:
# no sort pattern defined, so join them all together
val = ''.join(vals)
# sort values regardless of accents and case
val = translate_to_ascii(val).pop().lower()
if val in recIDs_dict:
recIDs_dict[val].append(recID)
else:
recIDs_dict[val] = [recID]
# create output array:
for k in sorted(recIDs_dict.keys()):
recIDs_out.extend(recIDs_dict[k])
# ascending or descending?
if sort_order == 'd':
recIDs_out.reverse()
recIDs = recIDs_out
# return only up to the maximum that we need
return slice_records(recIDs, jrec, rg)
def get_interval_for_records_to_sort(nb_found, jrec=None, rg=None):
"""calculates in which interval should the sorted records be
a value of 'rg=-9999' means to print all records: to be used with care."""
if not jrec:
jrec = 1
if not rg:
#return all
return jrec-1, nb_found
if rg == -9999: # print all records
rg = nb_found
else:
rg = abs(rg)
if jrec < 1: # sanity checks
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
# will sort records from irec_min to irec_max excluded
irec_min = jrec - 1
irec_max = irec_min + rg
if irec_min < 0:
irec_min = 0
if irec_max > nb_found:
irec_max = nb_found
return irec_min, irec_max
def print_records(req, recIDs, jrec=1, rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, format='hb', ot='', ln=CFG_SITE_LANG,
relevances=[], relevances_prologue="(", relevances_epilogue="%%)",
decompress=zlib.decompress, search_pattern='', print_records_prologue_p=True,
print_records_epilogue_p=True, verbose=0, tab='', sf='', so='d', sp='',
rm='', em='', nb_found=-1):
"""
Prints list of records 'recIDs' formatted according to 'format' in
groups of 'rg' starting from 'jrec'.
Assumes that the input list 'recIDs' is sorted in reverse order,
so it counts records from tail to head.
A value of 'rg=-9999' means to print all records: to be used with care.
Print also list of RELEVANCES for each record (if defined), in
between RELEVANCE_PROLOGUE and RELEVANCE_EPILOGUE.
Print prologue and/or epilogue specific to 'format' if
'print_records_prologue_p' and/or print_records_epilogue_p' are
True.
'sf' is sort field and 'rm' is ranking method that are passed here
only for proper linking purposes: e.g. when a certain ranking
method or a certain sort field was selected, keep it selected in
any dynamic search links that may be printed.
"""
if em != "" and EM_REPOSITORY["body"] not in em:
return
# load the right message language
_ = gettext_set_language(ln)
# sanity checking:
if req is None:
return
# get user_info (for formatting based on user)
if isinstance(req, cStringIO.OutputType):
user_info = {}
else:
user_info = collect_user_info(req)
if nb_found == -1:
nb_found = len(recIDs)
if nb_found:
if not rg or rg == -9999: # print all records
rg = nb_found
else:
rg = abs(rg)
if jrec < 1: # sanity checks
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
# will print records from irec_max to irec_min excluded:
irec_max = nb_found - jrec
irec_min = nb_found - jrec - rg
if irec_min < 0:
irec_min = -1
if irec_max >= nb_found:
irec_max = nb_found - 1
#req.write("%s:%d-%d" % (recIDs, irec_min, irec_max))
if format.startswith('x'):
# print header if needed
if print_records_prologue_p:
print_records_prologue(req, format)
if ot:
# asked to print some filtered fields only, so call print_record() on the fly:
for recid in recIDs:
x = print_record(recid,
format,
ot=ot,
ln=ln,
search_pattern=search_pattern,
user_info=user_info,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm)
req.write(x)
if x:
req.write('\n')
else:
format_records(recIDs,
format,
ln=ln,
search_pattern=search_pattern,
record_separator="\n",
user_info=user_info,
req=req)
# print footer if needed
if print_records_epilogue_p:
print_records_epilogue(req, format)
elif format.startswith('t') or str(format[0:3]).isdigit():
# we are doing plain text output:
for recid in recIDs:
x = print_record(recid, format, ot, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm)
req.write(x)
if x:
req.write('\n')
elif format.startswith('recjson'):
# we are doing recjson output:
req.write('[')
for idx, recid in enumerate(recIDs):
if idx > 0:
req.write(',')
req.write(print_record(recid, format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm))
req.write(']')
elif format == 'excel':
create_excel(recIDs=recIDs, req=req, ot=ot, user_info=user_info)
else:
# we are doing HTML output:
if format == 'hp' or format.startswith("hb_") or format.startswith("hd_"):
# portfolio and on-the-fly formats:
for recid in recIDs:
req.write(print_record(recid,
format,
ot=ot,
ln=ln,
search_pattern=search_pattern,
user_info=user_info,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm))
elif format.startswith("hb"):
# HTML brief format:
display_add_to_basket = True
if user_info:
if user_info['email'] == 'guest':
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS > 4:
display_add_to_basket = False
else:
if not user_info['precached_usebaskets']:
display_add_to_basket = False
if em != "" and EM_REPOSITORY["basket"] not in em:
display_add_to_basket = False
req.write(websearch_templates.tmpl_record_format_htmlbrief_header(ln=ln))
for irec, recid in enumerate(recIDs):
row_number = jrec+irec
if relevances and relevances[irec]:
relevance = relevances[irec]
else:
relevance = ''
record = print_record(recid,
format,
ot=ot,
ln=ln,
search_pattern=search_pattern,
user_info=user_info,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm)
req.write(websearch_templates.tmpl_record_format_htmlbrief_body(
ln=ln,
recid=recid,
row_number=row_number,
relevance=relevance,
record=record,
relevances_prologue=relevances_prologue,
relevances_epilogue=relevances_epilogue,
display_add_to_basket=display_add_to_basket
))
req.write(websearch_templates.tmpl_record_format_htmlbrief_footer(
ln=ln,
display_add_to_basket=display_add_to_basket))
elif format.startswith("hd"):
# HTML detailed format:
for recid in recIDs:
if record_exists(recid) == -1:
write_warning(_("The record has been deleted."), req=req)
merged_recid = get_merged_recid(recid)
if merged_recid:
write_warning(_("The record %d replaces it." % merged_recid), req=req)
continue
unordered_tabs = get_detailed_page_tabs(get_colID(guess_primary_collection_of_a_record(recid)),
recid, ln=ln)
ordered_tabs_id = [(tab_id, values['order']) for (tab_id, values) in unordered_tabs.iteritems()]
ordered_tabs_id.sort(lambda x, y: cmp(x[1], y[1]))
link_ln = ''
if ln != CFG_SITE_LANG:
link_ln = '?ln=%s' % ln
recid_to_display = recid # Record ID used to build the URL.
if CFG_WEBSEARCH_USE_ALEPH_SYSNOS:
try:
recid_to_display = get_fieldvalues(recid,
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG)[0]
except IndexError:
# No external sysno is available, keep using
# internal recid.
pass
tabs = [(unordered_tabs[tab_id]['label'],
'%s/%s/%s/%s%s' % (CFG_BASE_URL, CFG_SITE_RECORD, recid_to_display, tab_id, link_ln),
tab_id == tab,
unordered_tabs[tab_id]['enabled'])
for (tab_id, dummy_order) in ordered_tabs_id
if unordered_tabs[tab_id]['visible'] is True]
tabs_counts = get_detailed_page_tabs_counts(recid)
citedbynum = tabs_counts['Citations']
references = tabs_counts['References']
discussions = tabs_counts['Discussions']
# load content
if tab == 'usage':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
r = calculate_reading_similarity_list(recid, "downloads")
downloadsimilarity = None
downloadhistory = None
#if r:
# downloadsimilarity = r
if CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS:
downloadhistory = create_download_history_graph_and_box(recid, ln)
r = calculate_reading_similarity_list(recid, "pageviews")
viewsimilarity = None
if r:
viewsimilarity = r
content = websearch_templates.tmpl_detailed_record_statistics(recid,
ln,
downloadsimilarity=downloadsimilarity,
downloadhistory=downloadhistory,
viewsimilarity=viewsimilarity)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'citations':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
req.write(websearch_templates.tmpl_detailed_record_citations_prologue(recid, ln))
# Citing
citinglist = calculate_cited_by_list(recid)
req.write(websearch_templates.tmpl_detailed_record_citations_citing_list(recid,
ln,
citinglist,
sf=sf,
so=so,
sp=sp,
rm=rm))
# Self-cited
selfcited = get_self_cited_by(recid)
selfcited = rank_by_citations(get_self_cited_by(recid), verbose=verbose)
selfcited = reversed(selfcited[0])
# recid is already used, let's use recordid
selfcited = [recordid for recordid, dummy in selfcited]
req.write(websearch_templates.tmpl_detailed_record_citations_self_cited(recid,
ln, selfcited=selfcited, citinglist=citinglist))
# Co-cited
s = calculate_co_cited_with_list(recid)
cociting = None
if s:
cociting = s
req.write(websearch_templates.tmpl_detailed_record_citations_co_citing(recid,
ln,
cociting=cociting))
# Citation history, if needed
citationhistory = None
if citinglist:
citationhistory = create_citation_history_graph_and_box(recid, ln)
#debug
if verbose > 3:
write_warning("Citation graph debug: " +
str(len(citationhistory)), req=req)
req.write(websearch_templates.tmpl_detailed_record_citations_citation_history(ln, citationhistory))
# Citation log
entries = get_citers_log(recid)
if verbose > 3:
write_warning("Citation log debug: %s" % len(entries), req=req)
req.write(websearch_templates.tmpl_detailed_record_citations_citation_log(ln, entries))
req.write(websearch_templates.tmpl_detailed_record_citations_epilogue(recid, ln))
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'references':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
req.write(format_record(recid, 'HDREF', ln=ln, user_info=user_info, verbose=verbose, force_2nd_pass=True))
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'keywords':
from invenio.bibclassify_webinterface import main_page
main_page(req, recid, tabs, ln,
webstyle_templates,
websearch_templates)
elif tab == 'plots':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln))
content = websearch_templates.tmpl_record_plots(recID=recid,
ln=ln)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'data':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
include_jquery=True,
include_mathjax=True))
from invenio import hepdatautils
from invenio import hepdatadisplayutils
data = hepdatautils.retrieve_data_for_record(recid)
if data:
content = websearch_templates.tmpl_record_hepdata(data, recid, True)
else:
content = websearch_templates.tmpl_record_no_hepdata()
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
else:
# Metadata tab
req.write(webstyle_templates.detailed_record_container_top(
recid,
tabs,
ln,
show_short_rec_p=False,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
creationdate = None
modificationdate = None
earliestdate = None
if record_exists(recid) == 1:
creationdate = get_creation_date(recid)
modificationdate = get_modification_date(recid)
earliestdate = get_earliest_date(recid)
content = print_record(recid, format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm)
content = websearch_templates.tmpl_detailed_record_metadata(
recID=recid,
ln=ln,
format=format,
creationdate=creationdate,
modificationdate=modificationdate,
earliestdate=earliestdate,
content=content)
# display of the next-hit/previous-hit/back-to-search links
# on the detailed record pages
content += websearch_templates.tmpl_display_back_to_search(req,
recid,
ln)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln,
creationdate=creationdate,
modificationdate=modificationdate,
earliestdate=earliestdate,
show_short_rec_p=False))
if len(tabs) > 0:
# Add the mini box at bottom of the page
if CFG_WEBCOMMENT_ALLOW_REVIEWS:
from invenio.webcomment import get_mini_reviews
reviews = get_mini_reviews(recid=recid, ln=ln)
else:
reviews = ''
actions = format_record(recid, 'HDACT', ln=ln, user_info=user_info, verbose=verbose)
files = format_record(recid, 'HDFILE', ln=ln, user_info=user_info, verbose=verbose)
req.write(webstyle_templates.detailed_record_mini_panel(recid,
ln,
format,
files=files,
reviews=reviews,
actions=actions))
else:
# Other formats
for recid in recIDs:
req.write(print_record(recid, format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm))
else:
write_warning(_("Use different search terms."), req=req)
def print_records_prologue(req, format, cc=None):
"""
Print the appropriate prologue for list of records in the given
format.
"""
prologue = "" # no prologue needed for HTML or Text formats
if format.startswith('xm'):
prologue = websearch_templates.tmpl_xml_marc_prologue()
elif format.startswith('xn'):
prologue = websearch_templates.tmpl_xml_nlm_prologue()
elif format.startswith('xw'):
prologue = websearch_templates.tmpl_xml_refworks_prologue()
elif format.startswith('xr'):
prologue = websearch_templates.tmpl_xml_rss_prologue(cc=cc)
elif format.startswith('xe8x'):
prologue = websearch_templates.tmpl_xml_endnote_8x_prologue()
elif format.startswith('xe'):
prologue = websearch_templates.tmpl_xml_endnote_prologue()
elif format.startswith('xo'):
prologue = websearch_templates.tmpl_xml_mods_prologue()
elif format.startswith('xp'):
prologue = websearch_templates.tmpl_xml_podcast_prologue(cc=cc)
elif format.startswith('x'):
prologue = websearch_templates.tmpl_xml_default_prologue()
req.write(prologue)
def print_records_epilogue(req, format):
"""
Print the appropriate epilogue for list of records in the given
format.
"""
epilogue = "" # no epilogue needed for HTML or Text formats
if format.startswith('xm'):
epilogue = websearch_templates.tmpl_xml_marc_epilogue()
elif format.startswith('xn'):
epilogue = websearch_templates.tmpl_xml_nlm_epilogue()
elif format.startswith('xw'):
epilogue = websearch_templates.tmpl_xml_refworks_epilogue()
elif format.startswith('xr'):
epilogue = websearch_templates.tmpl_xml_rss_epilogue()
elif format.startswith('xe8x'):
epilogue = websearch_templates.tmpl_xml_endnote_8x_epilogue()
elif format.startswith('xe'):
epilogue = websearch_templates.tmpl_xml_endnote_epilogue()
elif format.startswith('xo'):
epilogue = websearch_templates.tmpl_xml_mods_epilogue()
elif format.startswith('xp'):
epilogue = websearch_templates.tmpl_xml_podcast_epilogue()
elif format.startswith('x'):
epilogue = websearch_templates.tmpl_xml_default_epilogue()
req.write(epilogue)
def get_record(recid):
"""Directly the record object corresponding to the recid."""
if CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE:
value = run_sql("SELECT value FROM bibfmt WHERE id_bibrec=%s AND FORMAT='recstruct'", (recid, ))
if value:
try:
val = value[0][0]
except IndexError:
### In case it does not exist, let's build it!
pass
else:
return deserialize_via_marshal(val)
return create_record(print_record(recid, 'xm'))[0]
def print_record(recID, format='hb', ot='', ln=CFG_SITE_LANG, decompress=zlib.decompress,
search_pattern=None, user_info=None, verbose=0, sf='', so='d', sp='', rm=''):
"""
Prints record 'recID' formatted according to 'format'.
'sf' is sort field and 'rm' is ranking method that are passed here
only for proper linking purposes: e.g. when a certain ranking
method or a certain sort field was selected, keep it selected in
any dynamic search links that may be printed.
"""
if format == 'recstruct':
return get_record(recID)
#check from user information if the user has the right to see hidden fields/tags in the
#records as well
can_see_hidden = False
if user_info:
can_see_hidden = user_info.get('precached_canseehiddenmarctags', False)
if format == 'recjson':
import json
from invenio.bibfield import get_record as get_recjson
from invenio.bibfield_utils import SmartDict
recjson = get_recjson(recID)
record = SmartDict()
keys = ot or recjson.keys()
for key in keys:
if key == 'bibdocs':
continue
if not can_see_hidden and key in CFG_BIBFORMAT_HIDDEN_RECJSON_FIELDS:
continue
try:
record[key] = recjson.get(key)
except IndexError:
pass
# skipkeys is True to skip e.g. the bibdocs key, which is a non
# primitive object.
return json.dumps(dict(record), skipkeys=True)
_ = gettext_set_language(ln)
# The 'attribute this paper' link is shown only if the session states it should and
# the record is included in the collections to which bibauthorid is limited.
if user_info:
display_claim_this_paper = (user_info.get("precached_viewclaimlink", False) and
(not BIBAUTHORID_LIMIT_TO_COLLECTIONS or
recID in intbitset.union(*[get_collection_reclist(x)
for x in BIBAUTHORID_LIMIT_TO_COLLECTIONS])))
else:
display_claim_this_paper = False
can_edit_record = False
if check_user_can_edit_record(user_info, recID):
can_edit_record = True
out = ""
# sanity check:
record_exist_p = record_exists(recID)
if record_exist_p == 0: # doesn't exist
return out
# We must still check some special formats, but these
# should disappear when BibFormat improves.
if not (format.lower().startswith('t')
or format.lower().startswith('hm')
or str(format[0:3]).isdigit()
or ot):
# Unspecified format is hd
if format == '':
format = 'hd'
if record_exist_p == -1 and get_output_format_content_type(format) == 'text/html':
# HTML output displays a default value for deleted records.
# Other format have to deal with it.
out += _("The record has been deleted.")
# was record deleted-but-merged ?
merged_recid = get_merged_recid(recID)
if merged_recid:
out += ' ' + _("The record %d replaces it." % merged_recid)
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
# at the end of HTML brief mode, print the "Detailed record" functionality:
if format.lower().startswith('hb') and \
format.lower() != 'hb_p':
out += websearch_templates.tmpl_print_record_brief_links(ln=ln,
recID=recID,
sf=sf,
so=so,
sp=sp,
rm=rm,
display_claim_link=display_claim_this_paper,
display_edit_link=can_edit_record)
return out
if format == "marcxml" or format == "oai_dc":
out += " <record>\n"
out += " <header>\n"
for oai_id in get_fieldvalues(recID, CFG_OAI_ID_FIELD):
out += " <identifier>%s</identifier>\n" % oai_id
out += " <datestamp>%s</datestamp>\n" % get_modification_date(recID)
out += " </header>\n"
out += " <metadata>\n"
if format.startswith("xm") or format == "marcxml":
# look for detailed format existence:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format), 1)
if res and record_exist_p == 1 and not ot:
# record 'recID' is formatted in 'format', and we are not
# asking for field-filtered output; so print it:
out += "%s" % decompress(res[0][0])
elif ot:
# field-filtered output was asked for; print only some fields
record = get_record(recID)
if not can_see_hidden:
for tag in CFG_BIBFORMAT_HIDDEN_TAGS:
record.pop(tag, None)
ot = list(set(ot) - set(CFG_BIBFORMAT_HIDDEN_TAGS))
out += record_xml_output(record, ot)
else:
# record 'recID' is not formatted in 'format' or we ask
# for field-filtered output -- they are not in "bibfmt"
# table; so fetch all the data from "bibXXx" tables:
if format == "marcxml":
out += """ <record xmlns="http://www.loc.gov/MARC21/slim">\n"""
out += " <controlfield tag=\"001\">%d</controlfield>\n" % int(recID)
elif format.startswith("xm"):
out += """ <record>\n"""
out += " <controlfield tag=\"001\">%d</controlfield>\n" % int(recID)
if record_exist_p == -1:
# deleted record, so display only OAI ID and 980:
oai_ids = get_fieldvalues(recID, CFG_OAI_ID_FIELD)
if oai_ids:
out += "<datafield tag=\"%s\" ind1=\"%s\" ind2=\"%s\"><subfield code=\"%s\">%s</subfield></datafield>\n" % \
(CFG_OAI_ID_FIELD[0:3], CFG_OAI_ID_FIELD[3:4], CFG_OAI_ID_FIELD[4:5], CFG_OAI_ID_FIELD[5:6], oai_ids[0])
out += "<datafield tag=\"980\" ind1=\"\" ind2=\"\"><subfield code=\"c\">DELETED</subfield></datafield>\n"
else:
# controlfields
query = "SELECT b.tag,b.value,bb.field_number FROM bib00x AS b, bibrec_bib00x AS bb "\
"WHERE bb.id_bibrec=%s AND b.id=bb.id_bibxxx AND b.tag LIKE '00%%' "\
"ORDER BY bb.field_number, b.tag ASC"
res = run_sql(query, (recID, ))
for row in res:
field, value = row[0], row[1]
value = encode_for_xml(value)
out += """ <controlfield tag="%s">%s</controlfield>\n""" % \
(encode_for_xml(field[0:3]), value)
# datafields
i = 1 # Do not process bib00x and bibrec_bib00x, as
# they are controlfields. So start at bib01x and
# bibrec_bib00x (and set i = 0 at the end of
# first loop)
for digit1 in range(0, 10):
for digit2 in range(i, 10):
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
query = "SELECT b.tag,b.value,bb.field_number FROM %s AS b, %s AS bb "\
"WHERE bb.id_bibrec=%%s AND b.id=bb.id_bibxxx AND b.tag LIKE %%s"\
"ORDER BY bb.field_number, b.tag ASC" % (bx, bibx)
res = run_sql(query, (recID, str(digit1)+str(digit2)+'%'))
field_number_old = -999
field_old = ""
for row in res:
field, value, field_number = row[0], row[1], row[2]
ind1, ind2 = field[3], field[4]
if ind1 == "_" or ind1 == "":
ind1 = " "
if ind2 == "_" or ind2 == "":
ind2 = " "
# print field tag, unless hidden
printme = True
if not can_see_hidden:
for htag in CFG_BIBFORMAT_HIDDEN_TAGS:
ltag = len(htag)
samelenfield = field[0:ltag]
if samelenfield == htag:
printme = False
if printme:
if field_number != field_number_old or field[:-1] != field_old[:-1]:
if field_number_old != -999:
out += """ </datafield>\n"""
out += """ <datafield tag="%s" ind1="%s" ind2="%s">\n""" % \
(encode_for_xml(field[0:3]), encode_for_xml(ind1), encode_for_xml(ind2))
field_number_old = field_number
field_old = field
# print subfield value
value = encode_for_xml(value)
out += """ <subfield code="%s">%s</subfield>\n""" % \
(encode_for_xml(field[-1:]), value)
# all fields/subfields printed in this run, so close the tag:
if field_number_old != -999:
out += """ </datafield>\n"""
i = 0 # Next loop should start looking at bib%0 and bibrec_bib00x
# we are at the end of printing the record:
out += " </record>\n"
elif format == "xd" or format == "oai_dc":
# XML Dublin Core format, possibly OAI -- select only some bibXXx fields:
out += """ <dc xmlns="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://purl.org/dc/elements/1.1/
http://www.openarchives.org/OAI/1.1/dc.xsd">\n"""
if record_exist_p == -1:
out += ""
else:
for f in get_fieldvalues(recID, "041__a"):
out += " <language>%s</language>\n" % f
for f in get_fieldvalues(recID, "100__a"):
out += " <creator>%s</creator>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "700__a"):
out += " <creator>%s</creator>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "245__a"):
out += " <title>%s</title>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "65017a"):
out += " <subject>%s</subject>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "8564_u"):
if f.split('.') == 'png':
continue
out += " <identifier>%s</identifier>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "520__a"):
out += " <description>%s</description>\n" % encode_for_xml(f)
out += " <date>%s</date>\n" % get_creation_date(recID)
out += " </dc>\n"
elif len(format) == 6 and str(format[0:3]).isdigit():
# user has asked to print some fields only
if format == "001":
out += "<!--%s-begin-->%s<!--%s-end-->\n" % (format, recID, format)
else:
vals = get_fieldvalues(recID, format)
for val in vals:
out += "<!--%s-begin-->%s<!--%s-end-->\n" % (format, val, format)
elif format.startswith('t'):
## user directly asked for some tags to be displayed only
if record_exist_p == -1:
out += get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden)
else:
out += get_fieldvalues_alephseq_like(recID, ot, can_see_hidden)
elif format == "hm":
if record_exist_p == -1:
out += "\n<pre style=\"margin: 1em 0px;\">" + cgi.escape(get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden)) + "</pre>"
else:
out += "\n<pre style=\"margin: 1em 0px;\">" + cgi.escape(get_fieldvalues_alephseq_like(recID, ot, can_see_hidden)) + "</pre>"
elif format.startswith("h") and ot:
## user directly asked for some tags to be displayed only
if record_exist_p == -1:
out += "\n<pre>" + get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden) + "</pre>"
else:
out += "\n<pre>" + get_fieldvalues_alephseq_like(recID, ot, can_see_hidden) + "</pre>"
elif format == "hd":
# HTML detailed format
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
# look for detailed format existence:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format), 1)
if res:
# record 'recID' is formatted in 'format', so print it
out += "%s" % decompress(res[0][0])
else:
# record 'recID' is not formatted in 'format', so try to call BibFormat on the fly or use default format:
out_record_in_format = call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
if out_record_in_format:
out += out_record_in_format
else:
out += websearch_templates.tmpl_print_record_detailed(
ln = ln,
recID = recID,
)
elif format.startswith("hb_") or format.startswith("hd_"):
# underscore means that HTML brief/detailed formats should be called on-the-fly; suitable for testing formats
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
elif format.startswith("hx"):
# BibTeX format, called on the fly:
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
elif format.startswith("hs"):
# for citation/download similarity navigation links:
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += '<a href="%s">' % websearch_templates.build_search_url(recid=recID, ln=ln)
# firstly, title:
titles = get_fieldvalues(recID, "245__a")
if titles:
for title in titles:
out += "<strong>%s</strong>" % title
else:
# usual title not found, try conference title:
titles = get_fieldvalues(recID, "111__a")
if titles:
for title in titles:
out += "<strong>%s</strong>" % title
else:
# just print record ID:
out += "<strong>%s %d</strong>" % (get_field_i18nname("record ID", ln, False), recID)
out += "</a>"
# secondly, authors:
authors = get_fieldvalues(recID, "100__a") + get_fieldvalues(recID, "700__a")
if authors:
out += " - %s" % authors[0]
if len(authors) > 1:
out += " <em>et al</em>"
# thirdly publication info:
publinfos = get_fieldvalues(recID, "773__s")
if not publinfos:
publinfos = get_fieldvalues(recID, "909C4s")
if not publinfos:
publinfos = get_fieldvalues(recID, "037__a")
if not publinfos:
publinfos = get_fieldvalues(recID, "088__a")
if publinfos:
out += " - %s" % publinfos[0]
else:
# fourthly publication year (if not publication info):
years = get_fieldvalues(recID, "773__y")
if not years:
years = get_fieldvalues(recID, "909C4y")
if not years:
years = get_fieldvalues(recID, "260__c")
if years:
out += " (%s)" % years[0]
else:
# HTML brief format by default
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format))
if res:
# record 'recID' is formatted in 'format', so print it
out += "%s" % decompress(res[0][0])
else:
# record 'recID' is not formatted in 'format', so try to call BibFormat on the fly: or use default format:
if CFG_WEBSEARCH_CALL_BIBFORMAT:
out_record_in_format = call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
if out_record_in_format:
out += out_record_in_format
else:
out += websearch_templates.tmpl_print_record_brief(
ln = ln,
recID = recID,
)
else:
out += websearch_templates.tmpl_print_record_brief(
ln = ln,
recID = recID,
)
# at the end of HTML brief mode, print the "Detailed record" functionality:
if format == 'hp' or format.startswith("hb_") or format.startswith("hd_"):
pass # do nothing for portfolio and on-the-fly formats
else:
out += websearch_templates.tmpl_print_record_brief_links(ln=ln,
recID=recID,
sf=sf,
so=so,
sp=sp,
rm=rm,
display_claim_link=display_claim_this_paper,
display_edit_link=can_edit_record)
# print record closing tags, if needed:
if format == "marcxml" or format == "oai_dc":
out += " </metadata>\n"
out += " </record>\n"
return out
def call_bibformat(recID, format="HD", ln=CFG_SITE_LANG, search_pattern=None, user_info=None, verbose=0):
"""
Calls BibFormat and returns formatted record.
BibFormat will decide by itself if old or new BibFormat must be used.
"""
from invenio.bibformat_utils import get_pdf_snippets
keywords = []
if search_pattern is not None:
for unit in create_basic_search_units(None, str(search_pattern), None):
bsu_o, bsu_p, bsu_f, bsu_m = unit[0], unit[1], unit[2], unit[3]
if (bsu_o != '-' and bsu_f in [None, 'fulltext']):
if bsu_m == 'a' and bsu_p.startswith('%') and bsu_p.endswith('%'):
# remove leading and training `%' representing partial phrase search
keywords.append(bsu_p[1:-1])
else:
keywords.append(bsu_p)
out = format_record(recID,
of=format,
ln=ln,
search_pattern=keywords,
user_info=user_info,
verbose=verbose)
if CFG_WEBSEARCH_FULLTEXT_SNIPPETS and user_info and \
'fulltext' in user_info['uri'].lower():
# check snippets only if URL contains fulltext
# FIXME: make it work for CLI too, via new function arg
if keywords:
snippets = ''
try:
snippets = get_pdf_snippets(recID, keywords, user_info)
except:
register_exception()
if snippets:
out += snippets
return out
def log_query(hostname, query_args, uid=-1):
"""
Log query into the query and user_query tables.
Return id_query or None in case of problems.
"""
id_query = None
if uid >= 0:
# log the query only if uid is reasonable
res = run_sql("SELECT id FROM query WHERE urlargs=%s", (query_args,), 1)
try:
id_query = res[0][0]
except IndexError:
id_query = run_sql("INSERT INTO query (type, urlargs) VALUES ('r', %s)", (query_args,))
if id_query:
run_sql("INSERT INTO user_query (id_user, id_query, hostname, date) VALUES (%s, %s, %s, %s)",
(uid, id_query, hostname,
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
return id_query
def log_query_info(action, p, f, colls, nb_records_found_total=-1):
"""Write some info to the log file for later analysis."""
try:
log = open(CFG_LOGDIR + "/search.log", "a")
log.write(time.strftime("%Y%m%d%H%M%S#", time.localtime()))
log.write(action+"#")
log.write(p+"#")
log.write(f+"#")
for coll in colls[:-1]:
log.write("%s," % coll)
log.write("%s#" % colls[-1])
log.write("%d" % nb_records_found_total)
log.write("\n")
log.close()
except:
pass
return
def clean_dictionary(dictionary, list_of_items):
"""Returns a copy of the dictionary with all the items
in the list_of_items as empty strings"""
out_dictionary = dictionary.copy()
out_dictionary.update((item, '') for item in list_of_items)
return out_dictionary
### CALLABLES
def perform_request_search(req=None, cc=CFG_SITE_NAME, c=None, p="", f="", rg=None, sf="", so="a", sp="", rm="", of="id", ot="", aas=0,
p1="", f1="", m1="", op1="", p2="", f2="", m2="", op2="", p3="", f3="", m3="", sc=0, jrec=0,
recid=-1, recidb=-1, sysno="", id=-1, idb=-1, sysnb="", action="", d1="",
d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0, dt="", verbose=0, ap=0, ln=CFG_SITE_LANG, ec=None, tab="",
wl=0, em=""):
"""Perform search or browse request, without checking for
authentication. Return list of recIDs found, if of=id.
Otherwise create web page.
The arguments are as follows:
req - mod_python Request class instance.
cc - current collection (e.g. "ATLAS"). The collection the
user started to search/browse from.
c - collection list (e.g. ["Theses", "Books"]). The
collections user may have selected/deselected when
starting to search from 'cc'.
p - pattern to search for (e.g. "ellis and muon or kaon").
f - field to search within (e.g. "author").
rg - records in groups of (e.g. "10"). Defines how many hits
per collection in the search results page are
displayed. (Note that `rg' is ignored in case of `of=id'.)
sf - sort field (e.g. "title").
so - sort order ("a"=ascending, "d"=descending).
sp - sort pattern (e.g. "CERN-") -- in case there are more
values in a sort field, this argument tells which one
to prefer
rm - ranking method (e.g. "jif"). Defines whether results
should be ranked by some known ranking method.
of - output format (e.g. "hb"). Usually starting "h" means
HTML output (and "hb" for HTML brief, "hd" for HTML
detailed), "x" means XML output, "t" means plain text
output, "id" means no output at all but to return list
of recIDs found, "intbitset" means to return an intbitset
representation of the recIDs found (no sorting or ranking
will be performed). (Suitable for high-level API.)
ot - output only these MARC tags (e.g. "100,700,909C0b").
Useful if only some fields are to be shown in the
output, e.g. for library to control some fields.
em - output only part of the page.
aas - advanced search ("0" means no, "1" means yes). Whether
search was called from within the advanced search
interface.
p1 - first pattern to search for in the advanced search
interface. Much like 'p'.
f1 - first field to search within in the advanced search
interface. Much like 'f'.
m1 - first matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
op1 - first operator, to join the first and the second unit
in the advanced search interface. ("a" add, "o" or,
"n" not).
p2 - second pattern to search for in the advanced search
interface. Much like 'p'.
f2 - second field to search within in the advanced search
interface. Much like 'f'.
m2 - second matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
op2 - second operator, to join the second and the third unit
in the advanced search interface. ("a" add, "o" or,
"n" not).
p3 - third pattern to search for in the advanced search
interface. Much like 'p'.
f3 - third field to search within in the advanced search
interface. Much like 'f'.
m3 - third matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
sc - split by collection ("0" no, "1" yes). Governs whether
we want to present the results in a single huge list,
or splitted by collection.
jrec - jump to record (e.g. "234"). Used for navigation
inside the search results. (Note that `jrec' is ignored
in case of `of=id'.)
recid - display record ID (e.g. "20000"). Do not
search/browse but go straight away to the Detailed
record page for the given recID.
recidb - display record ID bis (e.g. "20010"). If greater than
'recid', then display records from recid to recidb.
Useful for example for dumping records from the
database for reformatting.
sysno - display old system SYS number (e.g. ""). If you
migrate to Invenio from another system, and store your
old SYS call numbers, you can use them instead of recid
if you wish so.
id - the same as recid, in case recid is not set. For
backwards compatibility.
idb - the same as recid, in case recidb is not set. For
backwards compatibility.
sysnb - the same as sysno, in case sysno is not set. For
backwards compatibility.
action - action to do. "SEARCH" for searching, "Browse" for
browsing. Default is to search.
d1 - first datetime in full YYYY-mm-dd HH:MM:DD format
(e.g. "1998-08-23 12:34:56"). Useful for search limits
on creation/modification date (see 'dt' argument
below). Note that 'd1' takes precedence over d1y, d1m,
d1d if these are defined.
d1y - first date's year (e.g. "1998"). Useful for search
limits on creation/modification date.
d1m - first date's month (e.g. "08"). Useful for search
limits on creation/modification date.
d1d - first date's day (e.g. "23"). Useful for search
limits on creation/modification date.
d2 - second datetime in full YYYY-mm-dd HH:MM:DD format
(e.g. "1998-09-02 12:34:56"). Useful for search limits
on creation/modification date (see 'dt' argument
below). Note that 'd2' takes precedence over d2y, d2m,
d2d if these are defined.
d2y - second date's year (e.g. "1998"). Useful for search
limits on creation/modification date.
d2m - second date's month (e.g. "09"). Useful for search
limits on creation/modification date.
d2d - second date's day (e.g. "02"). Useful for search
limits on creation/modification date.
dt - first and second date's type (e.g. "c"). Specifies
whether to search in creation dates ("c") or in
modification dates ("m"). When dt is not set and d1*
and d2* are set, the default is "c".
verbose - verbose level (0=min, 9=max). Useful to print some
internal information on the searching process in case
something goes wrong.
ap - alternative patterns (0=no, 1=yes). In case no exact
match is found, the search engine can try alternative
patterns e.g. to replace non-alphanumeric characters by
a boolean query. ap defines if this is wanted.
ln - language of the search interface (e.g. "en"). Useful
for internationalization.
ec - list of external search engines to search as well
(e.g. "SPIRES HEP").
wl - wildcard limit (ex: 100) the wildcard queries will be
limited at 100 results
"""
kwargs = prs_wash_arguments(req=req, cc=cc, c=c, p=p, f=f, rg=rg, sf=sf, so=so, sp=sp, rm=rm, of=of, ot=ot, aas=aas,
p1=p1, f1=f1, m1=m1, op1=op1, p2=p2, f2=f2, m2=m2, op2=op2, p3=p3, f3=f3, m3=m3, sc=sc, jrec=jrec,
recid=recid, recidb=recidb, sysno=sysno, id=id, idb=idb, sysnb=sysnb, action=action, d1=d1,
d1y=d1y, d1m=d1m, d1d=d1d, d2=d2, d2y=d2y, d2m=d2m, d2d=d2d, dt=dt, verbose=verbose, ap=ap, ln=ln, ec=ec,
tab=tab, wl=wl, em=em)
return prs_perform_search(kwargs=kwargs, **kwargs)
def prs_perform_search(kwargs=None, **dummy):
"""Internal call which does the search, it is calling standard Invenio;
Unless you know what you are doing, don't use this call as an API
"""
# separately because we can call it independently
out = prs_wash_arguments_colls(kwargs=kwargs, **kwargs)
if not out:
return out
if ('Jobs' in kwargs['colls_to_display'] or 'Jobs' in kwargs['colls_to_search']) and kwargs['req']:
if kwargs['recid'] and kwargs['recid'] > 0:
return redirect_to_url(kwargs['req'], 'https://inspirehep.net/jobs/{0}'.format(kwargs['recid']),
apache.HTTP_MOVED_PERMANENTLY)
else:
return redirect_to_url(kwargs['req'], 'https://inspirehep.net/jobs',
apache.HTTP_MOVED_PERMANENTLY)
if ('Conferences' in kwargs['colls_to_display'] or 'Conferences' in kwargs['colls_to_search']) and kwargs['req']:
if kwargs['recid'] and kwargs['recid'] > 0:
return redirect_to_url(kwargs['req'], 'https://inspirehep.net/conferences/{0}'.format(kwargs['recid']),
apache.HTTP_MOVED_PERMANENTLY)
else:
return redirect_to_url(kwargs['req'], 'https://inspirehep.net/conferences',
apache.HTTP_MOVED_PERMANENTLY)
return prs_search(kwargs=kwargs, **kwargs)
def prs_wash_arguments_colls(kwargs=None, of=None, req=None, cc=None, c=None, sc=None, verbose=None,
aas=None, ln=None, em="", **dummy):
"""
Check and wash collection list argument before we start searching.
If there are troubles, e.g. a collection is not defined, print
warning to the browser.
@return: True if collection list is OK, and various False values
(empty string, empty list) if there was an error.
"""
# raise an exception when trying to print out html from the cli
if of.startswith("h"):
assert req
# for every search engine request asking for an HTML output, we
# first regenerate cache of collection and field I18N names if
# needed; so that later we won't bother checking timestamps for
# I18N names at all:
if of.startswith("h"):
collection_i18nname_cache.recreate_cache_if_needed()
field_i18nname_cache.recreate_cache_if_needed()
try:
(cc, colls_to_display, colls_to_search, hosted_colls, wash_colls_debug) = wash_colls(cc, c, sc, verbose) # which colls to search and to display?
kwargs['colls_to_display'] = colls_to_display
kwargs['colls_to_search'] = colls_to_search
kwargs['hosted_colls'] = hosted_colls
kwargs['wash_colls_debug'] = wash_colls_debug
except InvenioWebSearchUnknownCollectionError, exc:
colname = exc.colname
if of.startswith("h"):
page_start(req, of, cc, aas, ln, getUid(req),
websearch_templates.tmpl_collection_not_found_page_title(colname, ln))
req.write(websearch_templates.tmpl_collection_not_found_page_body(colname, ln))
page_end(req, of, ln, em)
return ''
elif of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
page_end(req, of, ln, em)
return ''
else:
page_end(req, of, ln, em)
return ''
return True
def prs_wash_arguments(req=None, cc=CFG_SITE_NAME, c=None, p="", f="", rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS,
sf="", so="d", sp="", rm="", of="id", ot="", aas=0,
p1="", f1="", m1="", op1="", p2="", f2="", m2="", op2="", p3="", f3="", m3="",
sc=0, jrec=0, recid=-1, recidb=-1, sysno="", id=-1, idb=-1, sysnb="", action="", d1="",
d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0, dt="", verbose=0, ap=0, ln=CFG_SITE_LANG,
ec=None, tab="", uid=None, wl=0, em="", **dummy):
"""
Sets the (default) values and checks others for the PRS call
"""
# wash output format:
of = wash_output_format(of, verbose=verbose, req=req)
# wash all arguments requiring special care
p = wash_pattern(p)
f = wash_field(f)
p1 = wash_pattern(p1)
f1 = wash_field(f1)
p2 = wash_pattern(p2)
f2 = wash_field(f2)
p3 = wash_pattern(p3)
f3 = wash_field(f3)
(d1y, d1m, d1d, d2y, d2m, d2d) = map(int, (d1y, d1m, d1d, d2y, d2m, d2d))
datetext1, datetext2 = wash_dates(d1, d1y, d1m, d1d, d2, d2y, d2m, d2d)
# wash ranking method:
if not is_method_valid(None, rm):
rm = ""
# backwards compatibility: id, idb, sysnb -> recid, recidb, sysno (if applicable)
if sysnb != "" and sysno == "":
sysno = sysnb
if id > 0 and recid == -1:
recid = id
if idb > 0 and recidb == -1:
recidb = idb
# TODO deduce passed search limiting criterias (if applicable)
pl, pl_in_url = "", "" # no limits by default
if action != "browse" and req and not isinstance(req, (cStringIO.OutputType, dict)) \
and getattr(req, 'args', None): # we do not want to add options while browsing or while calling via command-line
fieldargs = cgi.parse_qs(req.args)
for fieldcode in get_fieldcodes():
if fieldcode in fieldargs:
for val in fieldargs[fieldcode]:
pl += "+%s:\"%s\" " % (fieldcode, val)
pl_in_url += "&%s=%s" % (urllib.quote(fieldcode), urllib.quote(val))
# deduce recid from sysno argument (if applicable):
if sysno: # ALEPH SYS number was passed, so deduce DB recID for the record:
recid = get_mysql_recid_from_aleph_sysno(sysno)
if recid is None:
recid = 0 # use recid 0 to indicate that this sysno does not exist
# deduce collection we are in (if applicable):
if recid > 0:
referer = None
if req:
referer = req.headers_in.get('Referer')
cc = guess_collection_of_a_record(recid, referer)
# deduce user id (if applicable):
if uid is None:
try:
uid = getUid(req)
except:
uid = 0
_ = gettext_set_language(ln)
kwargs = {'req': req, 'cc': cc, 'c': c, 'p': p, 'f': f, 'rg': rg, 'sf': sf,
'so': so, 'sp': sp, 'rm': rm, 'of': of, 'ot': ot, 'aas': aas,
'p1': p1, 'f1': f1, 'm1': m1, 'op1': op1, 'p2': p2, 'f2': f2,
'm2': m2, 'op2': op2, 'p3': p3, 'f3': f3, 'm3': m3, 'sc': sc,
'jrec': jrec, 'recid': recid, 'recidb': recidb, 'sysno': sysno,
'id': id, 'idb': idb, 'sysnb': sysnb, 'action': action, 'd1': d1,
'd1y': d1y, 'd1m': d1m, 'd1d': d1d, 'd2': d2, 'd2y': d2y,
'd2m': d2m, 'd2d': d2d, 'dt': dt, 'verbose': verbose, 'ap': ap,
'ln': ln, 'ec': ec, 'tab': tab, 'wl': wl, 'em': em,
'datetext1': datetext1, 'datetext2': datetext2, 'uid': uid,
'pl': pl, 'pl_in_url': pl_in_url, '_': _,
'selected_external_collections_infos': None,
}
kwargs.update(**dummy)
return kwargs
def prs_search(kwargs=None, recid=0, req=None, cc=None, p=None, p1=None, p2=None, p3=None,
f=None, ec=None, verbose=None, ln=None, selected_external_collections_infos=None,
action=None, rm=None, of=None, em=None,
**dummy):
"""
This function write various bits into the req object as the search
proceeds (so that pieces of a page are rendered even before the
search ended)
"""
## 0 - start output
if recid >= 0: # recid can be 0 if deduced from sysno and if such sysno does not exist
output = prs_detailed_record(kwargs=kwargs, **kwargs)
if output is not None:
return output
elif action == "browse":
## 2 - browse needed
of = 'hb'
output = prs_browse(kwargs=kwargs, **kwargs)
if output is not None:
return output
elif rm and p.startswith("recid:"):
## 3-ter - similarity search (or old-style citation search) needed
output = prs_search_similar_records(kwargs=kwargs, **kwargs)
if output is not None:
return output
elif p.startswith("cocitedwith:"): #WAS EXPERIMENTAL
## 3-terter - cited by search needed
output = prs_search_cocitedwith(kwargs=kwargs, **kwargs)
if output is not None:
return output
else:
## 3 - common search needed
output = prs_search_common(kwargs=kwargs, **kwargs)
if output is not None:
return output
# External searches
if of.startswith("h"):
if not of in ['hcs', 'hcs2']:
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
def prs_detailed_record(kwargs=None, req=None, of=None, cc=None, aas=None, ln=None, uid=None, recid=None, recidb=None,
p=None, verbose=None, tab=None, sf=None, so=None, sp=None, rm=None, ot=None, _=None, em=None,
**dummy):
"""Formats and prints one record"""
## 1 - detailed record display
title, description, keywords = \
websearch_templates.tmpl_record_page_header_content(req, recid, ln)
if req is not None and not req.header_only:
page_start(req, of, cc, aas, ln, uid, title, description, keywords, recid, tab, em)
# Default format is hb but we are in detailed -> change 'of'
if of == "hb":
of = "hd"
if record_exists(recid):
if recidb <= recid: # sanity check
recidb = recid + 1
if of in ["id", "intbitset"]:
result = [recidx for recidx in range(recid, recidb) if record_exists(recidx)]
if of == "intbitset":
return intbitset(result)
else:
return result
else:
print_records(req, range(recid, recidb), -1, -9999, of, ot, ln,
search_pattern=p, verbose=verbose, tab=tab, sf=sf,
so=so, sp=sp, rm=rm, em=em, nb_found=len(range(recid, recidb)))
if req and of.startswith("h"): # register detailed record page view event
client_ip_address = str(req.remote_ip)
register_page_view_event(recid, uid, client_ip_address)
else: # record does not exist
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
elif of.startswith("h"):
if req.header_only:
raise apache.SERVER_RETURN(apache.HTTP_NOT_FOUND)
else:
req.status = apache.HTTP_NOT_FOUND
write_warning(_("Requested record does not seem to exist."), req=req)
def prs_browse(kwargs=None, req=None, of=None, cc=None, aas=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None,
colls_to_search=None, verbose=None, em=None, **dummy):
page_start(req, of, cc, aas, ln, uid, _("Browse"), p=create_page_title_search_pattern_info(p, p1, p2, p3), em=em)
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
write_warning(create_exact_author_browse_help_link(p, p1, p2, p3, f, f1, f2, f3,
rm, cc, ln, jrec, rg, aas, action),
req=req)
try:
if aas == 1 or (p1 or p2 or p3):
browse_pattern(req, colls_to_search, p1, f1, rg, ln)
browse_pattern(req, colls_to_search, p2, f2, rg, ln)
browse_pattern(req, colls_to_search, p3, f3, rg, ln)
else:
browse_pattern(req, colls_to_search, p, f, rg, ln)
except KeyboardInterrupt:
# This happens usually from the command line
# The error handling we want is different
raise
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
def prs_search_similar_records(kwargs=None, req=None, of=None, cc=None, pl_in_url=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None, em=None,
verbose=None, **dummy):
if req and not req.header_only:
page_start(req, of, cc, aas, ln, uid, _("Search Results"), p=create_page_title_search_pattern_info(p, p1, p2, p3),
em=em)
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
recid = p[6:]
if record_exists(recid) != 1:
# record does not exist
if of.startswith("h"):
if req.header_only:
raise apache.SERVER_RETURN(apache.HTTP_NOT_FOUND)
else:
req.status = apache.HTTP_NOT_FOUND
write_warning(_("Requested record does not seem to exist."), req=req)
if of == "id":
return []
if of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
# record well exists, so find similar ones to it
t1 = os.times()[4]
(results_similar_recIDs,
results_similar_relevances,
results_similar_relevances_prologue,
results_similar_relevances_epilogue,
results_similar_comments) = \
rank_records_bibrank(rank_method_code=rm,
rank_limit_relevance=0,
hitset=get_collection_reclist(cc),
related_to=[p],
verbose=verbose,
field=f,
rg=rg,
jrec=jrec)
if results_similar_recIDs:
t2 = os.times()[4]
cpu_time = t2 - t1
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, cc, len(results_similar_recIDs),
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
write_warning(results_similar_comments, req=req)
print_records(req, results_similar_recIDs, jrec, rg, of, ot, ln,
results_similar_relevances,
results_similar_relevances_prologue,
results_similar_relevances_epilogue,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_similar_recIDs))
elif of == "id":
return results_similar_recIDs
elif of == "intbitset":
return intbitset(results_similar_recIDs)
elif of.startswith("x"):
print_records(req, results_similar_recIDs, jrec, rg, of, ot, ln,
results_similar_relevances,
results_similar_relevances_prologue,
results_similar_relevances_epilogue,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_similar_recIDs))
else:
# rank_records failed and returned some error message to display:
if of.startswith("h"):
write_warning(results_similar_relevances_prologue, req=req)
write_warning(results_similar_relevances_epilogue, req=req)
write_warning(results_similar_comments, req=req)
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
def prs_search_cocitedwith(kwargs=None, req=None, of=None, cc=None, pl_in_url=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None,
verbose=None, em=None, **dummy):
page_start(req, of, cc, aas, ln, uid, _("Search Results"), p=create_page_title_search_pattern_info(p, p1, p2, p3),
em=em)
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
recID = p[12:]
if record_exists(recID) != 1:
# record does not exist
if of.startswith("h"):
req.status = apache.HTTP_NOT_FOUND
write_warning(_("Requested record does not seem to exist."), req=req)
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
# record well exists, so find co-cited ones:
t1 = os.times()[4]
results_cocited_recIDs = [x[0] for x in calculate_co_cited_with_list(int(recID))]
if results_cocited_recIDs:
t2 = os.times()[4]
cpu_time = t2 - t1
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, CFG_SITE_NAME, len(results_cocited_recIDs),
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
print_records(req, results_cocited_recIDs, jrec, rg, of, ot, ln,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_cocited_recIDs))
elif of == "id":
return results_cocited_recIDs
elif of == "intbitset":
return intbitset(results_cocited_recIDs)
elif of.startswith("x"):
print_records(req, results_cocited_recIDs, jrec, rg, of, ot, ln,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_cocited_recIDs))
else:
# cited rank_records failed and returned some error message to display:
if of.startswith("h"):
write_warning("nothing found", req=req)
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
def prs_search_hosted_collections(kwargs=None, req=None, of=None, ln=None, _=None, p=None,
p1=None, p2=None, p3=None, hosted_colls=None, f=None,
colls_to_search=None, hosted_colls_actual_or_potential_results_p=None,
verbose=None, **dummy):
hosted_colls_results = hosted_colls_timeouts = hosted_colls_true_results = None
# search into the hosted collections only if the output format is html or xml
if hosted_colls and (of.startswith("h") or of.startswith("x")) and not p.startswith("recid:"):
# hosted_colls_results : the hosted collections' searches that did not timeout
# hosted_colls_timeouts : the hosted collections' searches that timed out and will be searched later on again
(hosted_colls_results, hosted_colls_timeouts) = calculate_hosted_collections_results(req, [p, p1, p2, p3], f, hosted_colls, verbose, ln, CFG_HOSTED_COLLECTION_TIMEOUT_ANTE_SEARCH)
# successful searches
if hosted_colls_results:
hosted_colls_true_results = []
for result in hosted_colls_results:
# if the number of results is None or 0 (or False) then just do nothing
if result[1] is None or result[1] is False:
# these are the searches the returned no or zero results
if verbose:
write_warning("Hosted collections (perform_search_request): %s returned no results" % result[0][1].name, req=req)
else:
# these are the searches that actually returned results on time
hosted_colls_true_results.append(result)
if verbose:
write_warning("Hosted collections (perform_search_request): %s returned %s results in %s seconds" % (result[0][1].name, result[1], result[2]), req=req)
else:
if verbose:
write_warning("Hosted collections (perform_search_request): there were no hosted collections results to be printed at this time", req=req)
if hosted_colls_timeouts:
if verbose:
for timeout in hosted_colls_timeouts:
write_warning("Hosted collections (perform_search_request): %s timed out and will be searched again later" % timeout[0][1].name, req=req)
# we need to know for later use if there were any hosted collections to be searched even if they weren't in the end
elif hosted_colls and ((not (of.startswith("h") or of.startswith("x"))) or p.startswith("recid:")):
(hosted_colls_results, hosted_colls_timeouts) = (None, None)
else:
if verbose:
write_warning("Hosted collections (perform_search_request): there were no hosted collections to be searched", req=req)
## let's define some useful boolean variables:
# True means there are actual or potential hosted collections results to be printed
kwargs['hosted_colls_actual_or_potential_results_p'] = not (not hosted_colls or not ((hosted_colls_results and hosted_colls_true_results) or hosted_colls_timeouts))
# True means there are hosted collections timeouts to take care of later
# (useful for more accurate printing of results later)
kwargs['hosted_colls_potential_results_p'] = not (not hosted_colls or not hosted_colls_timeouts)
# True means we only have hosted collections to deal with
kwargs['only_hosted_colls_actual_or_potential_results_p'] = not colls_to_search and hosted_colls_actual_or_potential_results_p
kwargs['hosted_colls_results'] = hosted_colls_results
kwargs['hosted_colls_timeouts'] = hosted_colls_timeouts
kwargs['hosted_colls_true_results'] = hosted_colls_true_results
def prs_advanced_search(results_in_any_collection, kwargs=None, req=None, of=None,
cc=None, ln=None, _=None, p=None, p1=None, p2=None, p3=None,
f=None, f1=None, m1=None, op1=None, f2=None, m2=None,
op2=None, f3=None, m3=None, ap=None, ec=None,
selected_external_collections_infos=None, verbose=None,
wl=None, em=None, **dummy):
len_results_p1 = 0
len_results_p2 = 0
len_results_p3 = 0
try:
results_in_any_collection.union_update(search_pattern_parenthesised(req, p1, f1, m1, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl))
len_results_p1 = len(results_in_any_collection)
if len_results_p1 == 0:
if of.startswith("h"):
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec,
verbose, ln, selected_external_collections_infos, em=em)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
if p2:
results_tmp = search_pattern_parenthesised(req, p2, f2, m2, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl)
len_results_p2 = len(results_tmp)
if op1 == "a": # add
results_in_any_collection.intersection_update(results_tmp)
elif op1 == "o": # or
results_in_any_collection.union_update(results_tmp)
elif op1 == "n": # not
results_in_any_collection.difference_update(results_tmp)
else:
if of.startswith("h"):
write_warning("Invalid set operation %s." % cgi.escape(op1), "Error", req=req)
if len(results_in_any_collection) == 0:
if of.startswith("h"):
if len_results_p2:
#each individual query returned results, but the boolean operation did not
nearestterms = []
nearest_search_args = req.argd.copy()
if p1:
nearestterms.append((p1, len_results_p1, clean_dictionary(nearest_search_args, ['p2', 'f2', 'm2', 'p3', 'f3', 'm3'])))
nearestterms.append((p2, len_results_p2, clean_dictionary(nearest_search_args, ['p1', 'f1', 'm1', 'p3', 'f3', 'm3'])))
write_warning(websearch_templates.tmpl_search_no_boolean_hits(ln=ln, nearestterms=nearestterms), req=req)
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
if p3:
results_tmp = search_pattern_parenthesised(req, p3, f3, m3, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl)
len_results_p3 = len(results_tmp)
if op2 == "a": # add
results_in_any_collection.intersection_update(results_tmp)
elif op2 == "o": # or
results_in_any_collection.union_update(results_tmp)
elif op2 == "n": # not
results_in_any_collection.difference_update(results_tmp)
else:
if of.startswith("h"):
write_warning("Invalid set operation %s." % cgi.escape(op2), "Error", req=req)
if len(results_in_any_collection) == 0 and len_results_p3 and of.startswith("h"):
#each individual query returned results but the boolean operation did not
nearestterms = []
nearest_search_args = req.argd.copy()
if p1:
nearestterms.append((p1, len_results_p1, clean_dictionary(nearest_search_args, ['p2', 'f2', 'm2', 'p3', 'f3', 'm3'])))
if p2:
nearestterms.append((p2, len_results_p2, clean_dictionary(nearest_search_args, ['p1', 'f1', 'm1', 'p3', 'f3', 'm3'])))
nearestterms.append((p3, len_results_p3, clean_dictionary(nearest_search_args, ['p1', 'f1', 'm1', 'p2', 'f2', 'm2'])))
write_warning(websearch_templates.tmpl_search_no_boolean_hits(ln=ln, nearestterms=nearestterms), req=req)
except KeyboardInterrupt:
# This happens usually from the command line
# The error handling we want is different
raise
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
def prs_simple_search(results_in_any_collection, kwargs=None, req=None, of=None, cc=None, ln=None, p=None, f=None,
p1=None, p2=None, p3=None, ec=None, verbose=None, selected_external_collections_infos=None,
only_hosted_colls_actual_or_potential_results_p=None, query_representation_in_cache=None,
ap=None, hosted_colls_actual_or_potential_results_p=None, wl=None, em=None,
**dummy):
if query_representation_in_cache in search_results_cache.cache:
# query is not in the cache already, so reuse it:
results_in_any_collection.union_update(search_results_cache.cache[query_representation_in_cache])
if verbose and of.startswith("h"):
write_warning("Search stage 0: query found in cache, reusing cached results.", req=req)
else:
try:
# added the display_nearest_terms_box parameter to avoid printing out the "Nearest terms in any collection"
# recommendations when there are results only in the hosted collections. Also added the if clause to avoid
# searching in case we know we only have actual or potential hosted collections results
if not only_hosted_colls_actual_or_potential_results_p:
results_in_any_collection.union_update(search_pattern_parenthesised(req, p, f, ap=ap, of=of, verbose=verbose, ln=ln,
display_nearest_terms_box=not hosted_colls_actual_or_potential_results_p,
wl=wl))
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
def prs_intersect_results_with_collrecs(results_final, results_in_any_collection,
kwargs=None, colls_to_search=None,
req=None, of=None, ln=None,
cc=None, p=None, p1=None, p2=None, p3=None, f=None,
ec=None, verbose=None, selected_external_collections_infos=None,
em=None, **dummy):
display_nearest_terms_box=not kwargs['hosted_colls_actual_or_potential_results_p']
try:
# added the display_nearest_terms_box parameter to avoid printing out the "Nearest terms in any collection"
# recommendations when there results only in the hosted collections. Also added the if clause to avoid
# searching in case we know since the last stage that we have no results in any collection
if len(results_in_any_collection) != 0:
results_final.update(intersect_results_with_collrecs(req, results_in_any_collection, colls_to_search, of,
verbose, ln, display_nearest_terms_box=display_nearest_terms_box))
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
def prs_store_results_in_cache(query_representation_in_cache, results_in_any_collection, req=None, verbose=None, of=None, **dummy):
if CFG_WEBSEARCH_SEARCH_CACHE_SIZE and query_representation_in_cache not in search_results_cache.cache:
if len(search_results_cache.cache) > CFG_WEBSEARCH_SEARCH_CACHE_SIZE:
search_results_cache.clear()
search_results_cache.cache[query_representation_in_cache] = results_in_any_collection
if verbose and of.startswith("h"):
write_warning(req, "Search stage 3: storing query results in cache.", req=req)
def prs_apply_search_limits(results_final, kwargs=None, req=None, of=None, cc=None, ln=None, _=None,
p=None, p1=None, p2=None, p3=None, f=None, pl=None, ap=None, dt=None,
ec=None, selected_external_collections_infos=None,
hosted_colls_actual_or_potential_results_p=None,
datetext1=None, datetext2=None, verbose=None, wl=None, em=None,
**dummy):
if datetext1 != "" and results_final != {}:
if verbose and of.startswith("h"):
write_warning("Search stage 5: applying time etc limits, from %s until %s..." % (datetext1, datetext2), req=req)
try:
results_final = intersect_results_with_hitset(req,
results_final,
search_unit_in_bibrec(datetext1, datetext2, dt),
ap,
aptext= _("No match within your time limits, "
"discarding this condition..."),
of=of)
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
if results_final == {} and not hosted_colls_actual_or_potential_results_p:
if of.startswith("h"):
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
#if of.startswith("x"):
# # Print empty, but valid XML
# print_records_prologue(req, of)
# print_records_epilogue(req, of)
return page_end(req, of, ln, em)
if pl and results_final != {}:
pl = wash_pattern(pl)
if verbose and of.startswith("h"):
write_warning("Search stage 5: applying search pattern limit %s..." % cgi.escape(pl), req=req)
try:
results_final = intersect_results_with_hitset(req,
results_final,
search_pattern_parenthesised(req, pl, ap=0, ln=ln, wl=wl),
ap,
aptext=_("No match within your search limits, "
"discarding this condition..."),
of=of)
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
if results_final == {} and not hosted_colls_actual_or_potential_results_p:
if of.startswith("h"):
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
def prs_split_into_collections(kwargs=None, results_final=None, colls_to_search=None, hosted_colls_results=None,
cpu_time=0, results_final_nb_total=None, hosted_colls_actual_or_potential_results_p=None,
hosted_colls_true_results=None, hosted_colls_timeouts=None, **dummy):
results_final_nb_total = 0
results_final_nb = {} # will hold number of records found in each collection
# (in simple dict to display overview more easily)
for coll in results_final.keys():
results_final_nb[coll] = len(results_final[coll])
#results_final_nb_total += results_final_nb[coll]
# Now let us calculate results_final_nb_total more precisely,
# in order to get the total number of "distinct" hits across
# searched collections; this is useful because a record might
# have been attributed to more than one primary collection; so
# we have to avoid counting it multiple times. The price to
# pay for this accuracy of results_final_nb_total is somewhat
# increased CPU time.
if len(results_final.keys()) == 1:
# only one collection; no need to union them
results_final_for_all_selected_colls = results_final.values()[0]
results_final_nb_total = results_final_nb.values()[0]
else:
# okay, some work ahead to union hits across collections:
results_final_for_all_selected_colls = intbitset()
for coll in results_final.keys():
results_final_for_all_selected_colls.union_update(results_final[coll])
results_final_nb_total = len(results_final_for_all_selected_colls)
#if hosted_colls and (of.startswith("h") or of.startswith("x")):
if hosted_colls_actual_or_potential_results_p:
if hosted_colls_results:
for result in hosted_colls_true_results:
colls_to_search.append(result[0][1].name)
results_final_nb[result[0][1].name] = result[1]
results_final_nb_total += result[1]
cpu_time += result[2]
if hosted_colls_timeouts:
for timeout in hosted_colls_timeouts:
colls_to_search.append(timeout[1].name)
# use -963 as a special number to identify the collections that timed out
results_final_nb[timeout[1].name] = -963
kwargs['results_final_nb'] = results_final_nb
kwargs['results_final_nb_total'] = results_final_nb_total
kwargs['results_final_for_all_selected_colls'] = results_final_for_all_selected_colls
kwargs['cpu_time'] = cpu_time #rca TODO: check where the cpu_time is used, this line was missing
return (results_final_nb, results_final_nb_total, results_final_for_all_selected_colls)
def prs_summarize_records(kwargs=None, req=None, p=None, f=None, aas=None,
p1=None, p2=None, p3=None, f1=None, f2=None, f3=None, op1=None, op2=None,
ln=None, results_final_for_all_selected_colls=None, of='hcs', **dummy):
# feed the current search to be summarized:
from invenio.search_engine_summarizer import summarize_records
search_p = p
search_f = f
if not p and (aas == 1 or p1 or p2 or p3):
op_d = {'n': ' and not ', 'a': ' and ', 'o': ' or ', '': ''}
triples = ziplist([f1, f2, f3], [p1, p2, p3], [op1, op2, ''])
triples_len = len(triples)
for i in range(triples_len):
fi, pi, oi = triples[i] # e.g.:
if i < triples_len-1 and not triples[i+1][1]: # if p2 empty
triples[i+1][0] = '' # f2 must be too
oi = '' # and o1
if ' ' in pi:
pi = '"'+pi+'"'
if fi:
fi = fi + ':'
search_p += fi + pi + op_d[oi]
search_f = ''
summarize_records(results_final_for_all_selected_colls, of, ln, search_p, search_f, req)
def prs_print_records(kwargs=None, results_final=None, req=None, of=None, cc=None, pl_in_url=None,
ln=None, _=None, p=None, p1=None, p2=None, p3=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, d1y=None, d1m=None,
d1d=None, d2y=None, d2m=None, d2d=None, dt=None, jrec=None, colls_to_search=None,
hosted_colls_actual_or_potential_results_p=None, hosted_colls_results=None,
hosted_colls_true_results=None, hosted_colls_timeouts=None, results_final_nb=None,
cpu_time=None, verbose=None, em=None, **dummy):
if len(colls_to_search) > 1:
cpu_time = -1 # we do not want to have search time printed on each collection
print_records_prologue(req, of, cc=cc)
results_final_colls = []
wlqh_results_overlimit = 0
for coll in colls_to_search:
if coll in results_final and len(results_final[coll]):
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, coll, results_final_nb[coll],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
results_final_recIDs = list(results_final[coll])
results_final_nb_found = len(results_final_recIDs)
results_final_relevances = []
results_final_relevances_prologue = ""
results_final_relevances_epilogue = ""
if rm: # do we have to rank?
results_final_recIDs_ranked, results_final_relevances, results_final_relevances_prologue, results_final_relevances_epilogue, results_final_comments = \
rank_records(req, rm, 0, results_final[coll],
string.split(p) + string.split(p1) +
string.split(p2) + string.split(p3), verbose, so, of, ln, rg, jrec, kwargs['f'])
if of.startswith("h"):
write_warning(results_final_comments, req=req)
if results_final_recIDs_ranked:
results_final_recIDs = slice_records(results_final_recIDs_ranked, jrec, rg)
results_final_relevances = slice_records(results_final_relevances, jrec, rg)
else:
# rank_records failed and returned some error message to display:
write_warning(results_final_relevances_prologue, req=req)
write_warning(results_final_relevances_epilogue, req=req)
else:
results_final_recIDs = sort_records(req, results_final_recIDs, sf, so, sp, verbose, of, ln, rg, jrec)
if len(results_final_recIDs) < CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT:
results_final_colls.append(results_final_recIDs)
else:
wlqh_results_overlimit = 1
print_records(req, results_final_recIDs, jrec, rg, of, ot, ln,
results_final_relevances,
results_final_relevances_prologue,
results_final_relevances_epilogue,
search_pattern=p,
print_records_prologue_p=False,
print_records_epilogue_p=False,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm,
em=em,
nb_found=results_final_nb_found)
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, coll, results_final_nb[coll],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1, em=em))
if req and not isinstance(req, cStringIO.OutputType):
# store the last search results page
session_param_set(req, 'websearch-last-query', req.unparsed_uri)
if wlqh_results_overlimit:
results_final_colls = None
# store list of results if user wants to display hits
# in a single list, or store list of collections of records
# if user displays hits split by collections:
session_param_set(req, 'websearch-last-query-hits', results_final_colls)
#if hosted_colls and (of.startswith("h") or of.startswith("x")):
if hosted_colls_actual_or_potential_results_p:
if hosted_colls_results:
# TODO: add a verbose message here
for result in hosted_colls_true_results:
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, results_final_nb[result[0][1].name],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, limit=rg, em=em))
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, results_final_nb[result[0][1].name],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
if hosted_colls_timeouts:
# TODO: add a verbose message here
# TODO: check if verbose messages still work when dealing with (re)calculations of timeouts
(hosted_colls_timeouts_results, hosted_colls_timeouts_timeouts) = do_calculate_hosted_collections_results(req, ln, None, verbose, None, hosted_colls_timeouts, CFG_HOSTED_COLLECTION_TIMEOUT_POST_SEARCH)
if hosted_colls_timeouts_results:
for result in hosted_colls_timeouts_results:
if result[1] is None or result[1] is False:
## these are the searches the returned no or zero results
## also print a nearest terms box, in case this is the only
## collection being searched and it returns no results?
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, no_records_found=True, limit=rg, em=em))
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
else:
# these are the searches that actually returned results on time
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, result[1],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, limit=rg, em=em))
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, result[1],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
if hosted_colls_timeouts_timeouts:
for timeout in hosted_colls_timeouts_timeouts:
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, timeout[1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=timeout[0], ln=ln, of=of, req=req, search_timed_out=True, limit=rg, em=em))
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, timeout[1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
print_records_epilogue(req, of)
if f == "author" and of.startswith("h"):
req.write(create_similarly_named_authors_link_box(p, ln))
def prs_log_query(kwargs=None, req=None, uid=None, of=None, ln=None, p=None, f=None,
colls_to_search=None, results_final_nb_total=None, em=None, **dummy):
# log query:
try:
id_query = log_query(req.remote_host, req.args, uid)
if of.startswith("h") and id_query and (em == '' or EM_REPOSITORY["alert"] in em):
if not of in ['hcs', 'hcs2']:
# display alert/RSS teaser for non-summary formats:
user_info = collect_user_info(req)
display_email_alert_part = True
if user_info:
if user_info['email'] == 'guest':
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS > 4:
display_email_alert_part = False
else:
if not user_info['precached_usealerts']:
display_email_alert_part = False
req.write(websearch_templates.tmpl_alert_rss_teaser_box_for_query(id_query,
ln=ln, display_email_alert_part=display_email_alert_part))
except:
# do not log query if req is None (used by CLI interface)
pass
log_query_info("ss", p, f, colls_to_search, results_final_nb_total)
try:
loaded_websearch_services is not None
except Exception:
loaded_websearch_services = get_search_services()
def prs_search_common(kwargs=None, req=None, of=None, cc=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None, colls_to_search=None, wash_colls_debug=None,
verbose=None, wl=None, em=None, **dummy):
query_representation_in_cache = repr((p, f, colls_to_search, wl))
page_start(req, of, cc, aas, ln, uid, p=create_page_title_search_pattern_info(p, p1, p2, p3), em=em)
if of.startswith("h") and verbose and wash_colls_debug:
write_warning("wash_colls debugging info : %s" % wash_colls_debug, req=req)
prs_search_hosted_collections(kwargs=kwargs, **kwargs)
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
# WebSearch services
if jrec <= 1 and \
(em == "" and True or (EM_REPOSITORY["search_services"] in em)):
user_info = collect_user_info(req)
# display only on first search page, and only if wanted
# when 'em' param set.
if p:
search_units = create_basic_search_units(req, p, f)
else:
search_units = []
search_service_answers = [search_service.answer(req, user_info, of, cc, colls_to_search, p, f, search_units, ln) \
for search_service in loaded_websearch_services]
search_service_answers.sort(reverse=True)
nb_answers = 0
best_relevance = None
for answer_relevance, answer_html in search_service_answers:
nb_answers += 1
if best_relevance is None:
best_relevance = answer_relevance
if best_relevance <= CFG_WEBSEARCH_SERVICE_MIN_RELEVANCE_TO_DISPLAY:
# The answer is not relevant enough
if verbose > 8:
write_warning("Service relevance too low (%i). Answer would be: %s" % (answer_relevance, answer_html), req=req)
break
if nb_answers > CFG_WEBSEARCH_SERVICE_MAX_NB_SERVICE_DISPLAY:
# We have reached the max number of service to display
if verbose > 8:
write_warning("Max number of services (%i) reached." % CFG_WEBSEARCH_SERVICE_MAX_NB_SERVICE_DISPLAY, req=req)
break
if best_relevance - answer_relevance > CFG_WEBSEARCH_SERVICE_MAX_RELEVANCE_DIFFERENCE:
# The service gave an answer that is way less good than previous ones.
if verbose > 8:
write_warning("Service relevance too low (%i) compared to best one (%i). Answer would be: %s" % (answer_relevance, best_relevance, answer_html), req=req)
break
req.write('<div class="searchservicebox">')
req.write(answer_html)
if verbose > 8:
write_warning("Service relevance: %i" % answer_relevance, req=req)
req.write('</div>')
if answer_relevance == CFG_WEBSEARCH_SERVICE_MAX_SERVICE_ANSWER_RELEVANCE:
# The service assumes it has given the definitive answer
if verbose > 8:
write_warning("There cannot be a better answer. Leaving", req=req)
break
t1 = os.times()[4]
results_in_any_collection = intbitset()
if aas == 1 or (p1 or p2 or p3):
## 3A - advanced search
output = prs_advanced_search(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
else:
## 3B - simple search
output = prs_simple_search(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
if len(results_in_any_collection) == 0 and not kwargs['hosted_colls_actual_or_potential_results_p']:
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
if of == 'recjson':
req.write('[]\n')
return None
# store this search query results into search results cache if needed:
prs_store_results_in_cache(query_representation_in_cache, results_in_any_collection, **kwargs)
# search stage 4 and 5: intersection with collection universe and sorting/limiting
try:
output = prs_intersect_with_colls_and_apply_search_limits(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
except KeyboardInterrupt:
# This happens usually from the command line
# The error handling we want is different
raise
except: # no results to display
return None
t2 = os.times()[4]
cpu_time = t2 - t1
kwargs['cpu_time'] = cpu_time
## search stage 6: display results:
return prs_display_results(kwargs=kwargs, **kwargs)
def prs_intersect_with_colls_and_apply_search_limits(results_in_any_collection,
kwargs=None, req=None, of=None,
verbose=None, **dummy):
# search stage 4: intersection with collection universe:
if verbose and of.startswith("h"):
write_warning("Search stage 4: Starting with %s hits." % str(results_in_any_collection), req=req)
results_final = {}
output = prs_intersect_results_with_collrecs(results_final, results_in_any_collection, kwargs, **kwargs)
if output is not None:
return output
# another external search if we still don't have something
if results_final == {} and not kwargs['hosted_colls_actual_or_potential_results_p']:
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
kwargs['results_final'] = results_final
raise Exception
# search stage 5: apply search option limits and restrictions:
if verbose and of.startswith("h"):
write_warning("Search stage 5: Starting with %s hits." % str(results_final), req=req)
output = prs_apply_search_limits(results_final, kwargs=kwargs, **kwargs)
kwargs['results_final'] = results_final
if output is not None:
return output
def prs_display_results(kwargs=None, results_final=None, req=None, of=None, sf=None,
so=None, sp=None, verbose=None, p=None, p1=None, p2=None, p3=None,
cc=None, ln=None, _=None, ec=None, colls_to_search=None, rm=None, cpu_time=None,
f=None, em=None, jrec=0, rg=None, **dummy
):
## search stage 6: display results:
# split result set into collections
(results_final_nb, results_final_nb_total, results_final_for_all_selected_colls) = prs_split_into_collections(kwargs=kwargs, **kwargs)
# we continue past this point only if there is a hosted collection that has timed out and might offer potential results
if results_final_nb_total == 0 and not kwargs['hosted_colls_potential_results_p']:
if of.startswith("h"):
write_warning("No match found, please enter different search terms.", req=req)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
# yes, some hits found: good!
# collection list may have changed due to not-exact-match-found policy so check it out:
for coll in results_final.keys():
if coll not in colls_to_search:
colls_to_search.append(coll)
# print results overview:
if of == "intbitset":
#return the result as an intbitset
return results_final_for_all_selected_colls
elif of == "id":
# we have been asked to return list of recIDs
recIDs = list(results_final_for_all_selected_colls)
if rm: # do we have to rank?
results_final_for_all_colls_rank_records_output = rank_records(req, rm, 0, results_final_for_all_selected_colls,
p.split() + p1.split() +
p2.split() + p3.split(), verbose, so, of, ln, kwargs['rg'], kwargs['jrec'], kwargs['f'])
if results_final_for_all_colls_rank_records_output[0]:
recIDs = results_final_for_all_colls_rank_records_output[0]
elif sf or (CFG_BIBSORT_ENABLED and SORTING_METHODS): # do we have to sort?
recIDs = sort_records(req, recIDs, sf, so, sp, verbose, of, ln)
if rg:
return recIDs[jrec:jrec+rg]
else:
return recIDs[jrec:]
elif of.startswith("h"):
if of not in ['hcs', 'hcs2', 'hcv', 'htcv', 'tlcv']:
# added the hosted_colls_potential_results_p parameter to help print out the overview more accurately
req.write(print_results_overview(colls_to_search, results_final_nb_total, results_final_nb, cpu_time,
ln, ec, hosted_colls_potential_results_p=kwargs['hosted_colls_potential_results_p'], em=em))
kwargs['selected_external_collections_infos'] = print_external_results_overview(req, cc, [p, p1, p2, p3],
f, ec, verbose, ln, print_overview=em == "" or EM_REPOSITORY["overview"] in em)
# print number of hits found for XML outputs:
if of.startswith("x") or of == 'mobb':
req.write("<!-- Search-Engine-Total-Number-Of-Results: %s -->\n" % kwargs['results_final_nb_total'])
# print records:
if of in ['hcs', 'hcs2']:
prs_summarize_records(kwargs=kwargs, **kwargs)
elif of in ['hcv', 'htcv', 'tlcv'] and CFG_INSPIRE_SITE:
from invenio.search_engine_cvifier import cvify_records
if CFG_BIBSORT_ENABLED:
results_final_for_all_selected_colls = sort_records(
req, results_final_for_all_selected_colls,
sort_field=sf, sort_order='a', of=of, ln=ln)
cvify_records(results_final_for_all_selected_colls, of, req, so)
else:
prs_print_records(kwargs=kwargs, **kwargs)
prs_log_query(kwargs=kwargs, **kwargs)
# this is a copy of the prs_display_results with output parts removed, needed for external modules
def prs_rank_results(kwargs=None, results_final=None, req=None, colls_to_search=None,
sf=None, so=None, sp=None, of=None, rm=None, p=None, p1=None, p2=None, p3=None,
verbose=None, **dummy
):
## search stage 6: display results:
# split result set into collections
dummy_results_final_nb, dummy_results_final_nb_total, results_final_for_all_selected_colls = prs_split_into_collections(kwargs=kwargs, **kwargs)
# yes, some hits found: good!
# collection list may have changed due to not-exact-match-found policy so check it out:
for coll in results_final.keys():
if coll not in colls_to_search:
colls_to_search.append(coll)
# we have been asked to return list of recIDs
recIDs = list(results_final_for_all_selected_colls)
if rm: # do we have to rank?
results_final_for_all_colls_rank_records_output = rank_records(req, rm, 0, results_final_for_all_selected_colls,
p.split() + p1.split() +
p2.split() + p3.split(), verbose, so, of, field=kwargs['f'])
if results_final_for_all_colls_rank_records_output[0]:
recIDs = results_final_for_all_colls_rank_records_output[0]
elif sf or (CFG_BIBSORT_ENABLED and SORTING_METHODS): # do we have to sort?
recIDs = sort_records(req, recIDs, sf, so, sp, verbose, of)
return recIDs
def perform_request_cache(req, action="show"):
"""Manipulates the search engine cache."""
req.content_type = "text/html"
req.send_http_header()
req.write("<html>")
out = ""
out += "<h1>Search Cache</h1>"
# clear cache if requested:
if action == "clear":
search_results_cache.clear()
req.write(out)
# show collection reclist cache:
out = "<h3>Collection reclist cache</h3>"
out += "- collection table last updated: %s" % get_table_update_time('collection')
out += "<br />- reclist cache timestamp: %s" % collection_reclist_cache.timestamp
out += "<br />- reclist cache contents:"
out += "<blockquote>"
for coll in collection_reclist_cache.cache.keys():
if collection_reclist_cache.cache[coll]:
out += "%s (%d)<br />" % (coll, len(collection_reclist_cache.cache[coll]))
out += "</blockquote>"
req.write(out)
# show search results cache:
out = "<h3>Search Cache</h3>"
out += "- search cache usage: %d queries cached (max. ~%d)" % \
(len(search_results_cache.cache), CFG_WEBSEARCH_SEARCH_CACHE_SIZE)
if len(search_results_cache.cache):
out += "<br />- search cache contents:"
out += "<blockquote>"
for query, hitset in search_results_cache.cache.items():
out += "<br />%s ... %s" % (query, hitset)
out += """<p><a href="%s/search/cache?action=clear">clear search results cache</a>""" % CFG_SITE_URL
out += "</blockquote>"
req.write(out)
# show field i18nname cache:
out = "<h3>Field I18N names cache</h3>"
out += "- fieldname table last updated: %s" % get_table_update_time('fieldname')
out += "<br />- i18nname cache timestamp: %s" % field_i18nname_cache.timestamp
out += "<br />- i18nname cache contents:"
out += "<blockquote>"
for field in field_i18nname_cache.cache.keys():
for ln in field_i18nname_cache.cache[field].keys():
out += "%s, %s = %s<br />" % (field, ln, field_i18nname_cache.cache[field][ln])
out += "</blockquote>"
req.write(out)
# show collection i18nname cache:
out = "<h3>Collection I18N names cache</h3>"
out += "- collectionname table last updated: %s" % get_table_update_time('collectionname')
out += "<br />- i18nname cache timestamp: %s" % collection_i18nname_cache.timestamp
out += "<br />- i18nname cache contents:"
out += "<blockquote>"
for coll in collection_i18nname_cache.cache.keys():
for ln in collection_i18nname_cache.cache[coll].keys():
out += "%s, %s = %s<br />" % (coll, ln, collection_i18nname_cache.cache[coll][ln])
out += "</blockquote>"
req.write(out)
req.write("</html>")
return "\n"
def perform_request_log(req, date=""):
"""Display search log information for given date."""
req.content_type = "text/html"
req.send_http_header()
req.write("<html>")
req.write("<h1>Search Log</h1>")
if date: # case A: display stats for a day
yyyymmdd = string.atoi(date)
req.write("<p><big><strong>Date: %d</strong></big><p>" % yyyymmdd)
req.write("""<table border="1">""")
req.write("<tr><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td></tr>" % ("No.", "Time", "Pattern", "Field", "Collection", "Number of Hits"))
# read file:
p = os.popen("grep ^%d %s/search.log" % (yyyymmdd, CFG_LOGDIR), 'r')
lines = p.readlines()
p.close()
# process lines:
i = 0
for line in lines:
try:
datetime, dummy_aas, p, f, c, nbhits = line.split("#")
i += 1
req.write("<tr><td align=\"right\">#%d</td><td>%s:%s:%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>"
% (i, datetime[8:10], datetime[10:12], datetime[12:], p, f, c, nbhits))
except:
pass # ignore eventual wrong log lines
req.write("</table>")
else: # case B: display summary stats per day
yyyymm01 = int(time.strftime("%Y%m01", time.localtime()))
yyyymmdd = int(time.strftime("%Y%m%d", time.localtime()))
req.write("""<table border="1">""")
req.write("<tr><td><strong>%s</strong></td><td><strong>%s</strong></tr>" % ("Day", "Number of Queries"))
for day in range(yyyymm01, yyyymmdd + 1):
p = os.popen("grep -c ^%d %s/search.log" % (day, CFG_LOGDIR), 'r')
for line in p.readlines():
req.write("""<tr><td>%s</td><td align="right"><a href="%s/search/log?date=%d">%s</a></td></tr>""" %
(day, CFG_SITE_URL, day, line))
p.close()
req.write("</table>")
req.write("</html>")
return "\n"
def get_all_field_values(tag):
"""
Return all existing values stored for a given tag.
@param tag: the full tag, e.g. 909C0b
@type tag: string
@return: the list of values
@rtype: list of strings
"""
table = 'bib%02dx' % int(tag[:2])
return [row[0] for row in run_sql("SELECT DISTINCT(value) FROM %s WHERE tag=%%s" % table, (tag, ))]
def get_most_popular_field_values(recids, tags, exclude_values=None, count_repetitive_values=True, split_by=0):
"""
Analyze RECIDS and look for TAGS and return most popular values
and the frequency with which they occur sorted according to
descending frequency.
If a value is found in EXCLUDE_VALUES, then do not count it.
If COUNT_REPETITIVE_VALUES is True, then we count every occurrence
of value in the tags. If False, then we count the value only once
regardless of the number of times it may appear in a record.
(But, if the same value occurs in another record, we count it, of
course.)
@return: list of tuples containing tag and its frequency
Example:
>>> get_most_popular_field_values(range(11,20), '980__a')
[('PREPRINT', 10), ('THESIS', 7), ...]
>>> get_most_popular_field_values(range(11,20), ('100__a', '700__a'))
[('Ellis, J', 10), ('Ellis, N', 7), ...]
>>> get_most_popular_field_values(range(11,20), ('100__a', '700__a'), ('Ellis, J'))
[('Ellis, N', 7), ...]
"""
def _get_most_popular_field_values_helper_sorter(val1, val2):
"""Compare VAL1 and VAL2 according to, firstly, frequency, then
secondly, alphabetically."""
compared_via_frequencies = cmp(valuefreqdict[val2],
valuefreqdict[val1])
if compared_via_frequencies == 0:
return cmp(val1.lower(), val2.lower())
else:
return compared_via_frequencies
valuefreqdict = {}
## sanity check:
if not exclude_values:
exclude_values = []
if isinstance(tags, str):
tags = (tags,)
## find values to count:
vals_to_count = []
displaytmp = {}
if count_repetitive_values:
# counting technique A: can look up many records at once: (very fast)
for tag in tags:
vals_to_count.extend(get_fieldvalues(recids, tag, sort=False,
split_by=split_by))
else:
# counting technique B: must count record-by-record: (slow)
for recid in recids:
vals_in_rec = []
for tag in tags:
for val in get_fieldvalues(recid, tag, False):
vals_in_rec.append(val)
# do not count repetitive values within this record
# (even across various tags, so need to unify again):
dtmp = {}
for val in vals_in_rec:
dtmp[val.lower()] = 1
displaytmp[val.lower()] = val
vals_in_rec = dtmp.keys()
vals_to_count.extend(vals_in_rec)
## are we to exclude some of found values?
for val in vals_to_count:
if val not in exclude_values:
if val in valuefreqdict:
valuefreqdict[val] += 1
else:
valuefreqdict[val] = 1
## sort by descending frequency of values:
if not CFG_NUMPY_IMPORTABLE:
## original version
out = []
vals = valuefreqdict.keys()
vals.sort(_get_most_popular_field_values_helper_sorter)
for val in vals:
tmpdisplv = ''
if val in displaytmp:
tmpdisplv = displaytmp[val]
else:
tmpdisplv = val
out.append((tmpdisplv, valuefreqdict[val]))
return out
else:
f = [] # frequencies
n = [] # original names
ln = [] # lowercased names
## build lists within one iteration
for (val, freq) in valuefreqdict.iteritems():
f.append(-1 * freq)
if val in displaytmp:
n.append(displaytmp[val])
else:
n.append(val)
ln.append(val.lower())
## sort by frequency (desc) and then by lowercased name.
return [(n[i], -1 * f[i]) for i in numpy.lexsort([ln, f])]
def profile(p="", f="", c=CFG_SITE_NAME):
"""Profile search time."""
import profile as pyprofile
import pstats
pyprofile.run("perform_request_search(p='%s',f='%s', c='%s')" % (p, f, c), "perform_request_search_profile")
p = pstats.Stats("perform_request_search_profile")
p.strip_dirs().sort_stats("cumulative").print_stats()
return 0
def perform_external_collection_search_with_em(req, current_collection, pattern_list, field,
external_collection, verbosity_level=0, lang=CFG_SITE_LANG,
selected_external_collections_infos=None, em=""):
perform_external_collection_search(req, current_collection, pattern_list, field, external_collection,
verbosity_level, lang, selected_external_collections_infos,
print_overview=em == "" or EM_REPOSITORY["overview"] in em,
print_search_info=em == "" or EM_REPOSITORY["search_info"] in em,
print_see_also_box=em == "" or EM_REPOSITORY["see_also_box"] in em,
print_body=em == "" or EM_REPOSITORY["body"] in em)
def check_user_can_edit_record(req, recid):
""" Check if user has authorization to modify a collection
the recid belongs to
"""
record_collections = get_all_collections_of_a_record(recid)
if not record_collections:
# Check if user has access to all collections
auth_code, auth_message = acc_authorize_action(req, 'runbibedit',
collection='')
if auth_code == 0:
return True
else:
for collection in record_collections:
auth_code, auth_message = acc_authorize_action(req, 'runbibedit',
collection=collection)
if auth_code == 0:
return True
return False
| gpl-2.0 |
numenta/nupic.research | packages/dendrites/tests/unit/dendritric_layer_test.py | 3 | 36012 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2020, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
import torch
from nupic.research.frameworks.dendrites import (
AbsoluteMaxGatingDendriticLayer,
AbsoluteMaxGatingDendriticLayer2d,
BiasingDendriticLayer,
DendriteSegments,
GatingDendriticLayer,
GatingDendriticLayer2d,
)
from nupic.research.frameworks.pytorch.model_utils import count_nonzero_params
class DendriteSegmentsTests(unittest.TestCase):
def test_forward(self):
"""Validate shape of forward output."""
dendrite_segments = DendriteSegments(
num_units=10, num_segments=20, dim_context=15, sparsity=0.7, bias=True
)
dendrite_segments.rezero_weights()
batch_size = 8
context = torch.rand(batch_size, dendrite_segments.dim_context)
out = dendrite_segments(context)
self.assertEqual(out.shape, (8, 10, 20))
def test_sparsity(self):
"""
Validate that sparsity is enforced per unit per segement.
"""
sparsity = 9 / 15
dendrite_segments = DendriteSegments(
num_units=10, num_segments=20, dim_context=15, sparsity=sparsity, bias=True
)
weights = dendrite_segments.weights.data
weights[:] = 1
dendrite_segments.rezero_weights()
for unit in range(dendrite_segments.num_units):
for segment in range(dendrite_segments.num_segments):
w = weights[unit, segment, :]
num_off = (weights[unit, segment, :] == 0).sum().item()
actual_sparsity = num_off / w.numel()
self.assertEqual(
sparsity,
actual_sparsity,
f"Sparsity {actual_sparsity} != {sparsity}"
f"for unit {unit} and segment {segment}",
)
def test_equivalent_forward(self):
"""
Validate output with respect to an equivalent operation:
applying the dendrite segments one-by-one for each unit.
"""
dendrite_segments = DendriteSegments(
num_units=10, num_segments=20, dim_context=15, sparsity=0.7, bias=True
)
batch_size = 8
context = torch.rand(batch_size, dendrite_segments.dim_context)
out = dendrite_segments(context) # shape batch_size x num_units x num_segments
weights = dendrite_segments.weights
biases = dendrite_segments.biases
for unit in range(dendrite_segments.num_units):
unit_weight = weights[unit, ...]
unit_bias = biases[unit, ...]
expected_out = torch.nn.functional.linear(context, unit_weight, unit_bias)
actual_out = out[:, unit, :]
same_out = torch.allclose(actual_out, expected_out, atol=1e-7)
self.assertTrue(
same_out,
f"Didn't observe the expected output for unit {unit}: "
f"actual_out - expected_out = {actual_out - expected_out}",
)
class BiasingDendriticLayerTests(unittest.TestCase):
def test_forward_output_shape(self):
"""Validate shape of forward output."""
# Dendritic weights as a bias.
linear = torch.nn.Linear(10, 10)
dendrite_layer = BiasingDendriticLayer(
module=linear,
num_segments=20,
dim_context=15,
module_sparsity=0.7,
dendrite_sparsity=0.9,
dendrite_bias=True,
)
dendrite_layer.rezero_weights()
batch_size = 8
input_dim = dendrite_layer.module.weight.shape[1]
context_dim = dendrite_layer.segments.weights.shape[2]
x = torch.rand(batch_size, input_dim)
context = torch.rand(batch_size, context_dim)
out = dendrite_layer(x, context)
self.assertEqual(out.shape, (8, 10))
def test_forward_output_values(self):
"""
Test all parts of the forward pass of a biasing dendritic layer from end to end.
"""
# Dendritic weights as a bias.
linear = torch.nn.Linear(4, 4, bias=False)
dendrite_layer = BiasingDendriticLayer(
module=linear,
num_segments=3,
dim_context=4,
module_sparsity=0.7,
dendrite_sparsity=0.7,
dendrite_bias=False,
)
dendrite_layer.rezero_weights()
linear.weight.data[:] = torch.tensor(
[
[-0.04, 0.00, 0.00, 0.00],
[0.00, 0.00, 0.00, -0.26],
[0.00, 0.00, 0.00, -0.13],
[0.00, 0.00, 0.00, 0.41],
],
requires_grad=True,
)
dendrite_layer.segments.weights.data[:] = torch.tensor(
[
[
[-0.26, 0.00, 0.00, 0.00],
[0.09, 0.00, 0.00, 0.00],
[-0.34, 0.00, 0.00, 0.00],
],
[
[0.00, 0.00, 0.00, 0.36],
[0.00, 0.00, 0.00, -0.32],
[0.00, 0.00, 0.00, 0.41],
],
[
[0.00, 0.00, 0.00, 0.18],
[0.00, 0.00, 0.38, 0.00],
[0.00, 0.00, 0.23, 0.00],
],
[
[0.00, 0.00, 0.00, 0.23],
[-0.30, 0.00, 0.00, 0.00],
[0.00, 0.00, -0.24, 0.00],
],
],
requires_grad=True,
)
# Pseudo input: batch_size=2, input_dim=4
x = torch.tensor([[0.79, 0.36, 0.47, 0.30], [0.55, 0.64, 0.50, 0.50]])
# Pseudo input: batch_size=2, context_dim=4
context = torch.tensor([[0.84, 0.63, 0.67, 0.42], [0.30, 0.07, 0.52, 0.15]])
# Expected dendrite activations: dendrite_layer.segments(context)
# This will be the shape batch_size x num_units x num_segments
expected_dendrite_activations = torch.tensor(
[
[
[-0.2184, 0.0756, -0.2856],
[0.1512, -0.1344, 0.1722],
[0.0756, 0.2546, 0.1541],
[0.0966, -0.2520, -0.1608],
],
[
[-0.0780, 0.0270, -0.1020],
[0.0540, -0.0480, 0.0615],
[0.0270, 0.1976, 0.1196],
[0.0345, -0.0900, -0.1248],
],
]
)
# Validate dendrite activations.
actual_dendrite_activations = dendrite_layer.segments(context)
self.assertTrue(
expected_dendrite_activations.allclose(actual_dendrite_activations)
)
# Validate the biasing term: max per batch per unit
biasing_dendrites = torch.tensor(
[[0.0756, 0.1722, 0.2546, 0.0966], [0.0270, 0.0615, 0.1976, 0.0345]]
)
all_matches = (
expected_dendrite_activations.max(dim=2).values == biasing_dendrites
).all()
self.assertTrue(all_matches)
# Validate output of dendritic layer.
expected_out = linear(x) + biasing_dendrites
actual_out = dendrite_layer(x, context)
self.assertTrue(expected_out.allclose(actual_out))
def test_apply_biasing_dendrites(self):
"""
Validate the apply_dendrites function of a biasing dendritic layer.
The max of the dendrite_activations should be taken per batch per unit.
"""
# Dendritic weights as a bias.
linear = torch.nn.Linear(10, 10)
dendrite_layer = BiasingDendriticLayer(
module=linear,
num_segments=20,
dim_context=15,
module_sparsity=0.7,
dendrite_sparsity=0.9,
dendrite_bias=True,
)
# pseudo output: batch_size=2, out_features=3
y = torch.tensor([[0.1, -0.1, 0.5], [0.2, 0.3, -0.2]])
# pseudo dendrite_activations: batch_size=2, num_units=3, num_segments=3
dendrite_activations = torch.tensor(
[
[[0.43, 1.64, 1.49], [-0.79, 0.53, 1.08], [0.02, 0.04, 0.57]],
[[1.79, -0.48, -0.38], [-0.15, 0.76, -1.13], [1.04, -0.58, -0.31]],
]
)
# Expected max activation per batch per unit.
max_activation = torch.tensor([[1.64, 1.08, 0.57], [1.79, 0.76, 1.04]])
# Expected output: dendrites applied as bias
expected_output = y + max_activation
actual_output = dendrite_layer.apply_dendrites(y, dendrite_activations)
all_matches = (expected_output == actual_output).all()
self.assertTrue(all_matches)
def test_sparsity(self):
"""
Ensure both the linear weights and segment weights are rezeroed properly.
"""
linear_sparsity = 70 / 100
dendrite_sparsity = 13 / 15
linear = torch.nn.Linear(10, 10)
dendrite_layer = BiasingDendriticLayer(
module=linear,
num_segments=20,
dim_context=15,
module_sparsity=linear_sparsity,
dendrite_sparsity=dendrite_sparsity,
dendrite_bias=True,
)
linear_weights = linear.weight.data
dendrite_weights = dendrite_layer.segments.weights.data
linear_weights[:] = 1
dendrite_weights[:] = 1
dendrite_layer.rezero_weights()
num_off = (dendrite_weights == 0).sum().item()
actual_dendrite_sparsity = num_off / dendrite_weights.numel()
self.assertEqual(dendrite_sparsity, actual_dendrite_sparsity)
num_off = (linear_weights == 0).sum().item()
actual_linear_sparsity = num_off / linear_weights.numel()
self.assertEqual(linear_sparsity, actual_linear_sparsity)
def test_gating_forward(self):
"""Validate shape of forward output."""
# Gating dendritic weights.
linear = torch.nn.Linear(10, 10)
dendrite_layer = GatingDendriticLayer(
module=linear,
num_segments=20,
dim_context=15,
module_sparsity=0.7,
dendrite_sparsity=0.9,
dendrite_bias=True,
)
dendrite_layer.rezero_weights()
batch_size = 8
input_dim = dendrite_layer.module.weight.shape[1]
context_dim = dendrite_layer.segments.weights.shape[2]
x = torch.rand(batch_size, input_dim)
context = torch.rand(batch_size, context_dim)
out = dendrite_layer(x, context)
self.assertEqual(out.shape, (8, 10))
def test_apply_gating_dendrites(self):
"""
Validate the apply_dendrites function of a gating dendrite layer.
The max of the dendrite_activations should be taken per batch per unit.
"""
# Dendrite weights as a bias.
linear = torch.nn.Linear(10, 10)
dendrite_layer = GatingDendriticLayer(
module=linear,
num_segments=20,
dim_context=15,
module_sparsity=0.7,
dendrite_sparsity=0.9,
dendrite_bias=True,
)
# pseudo output: batch_size=2, out_features=3
y = torch.tensor([[0.73, 0.72, 0.62], [0.26, 0.24, 0.65]])
# pseudo dendrite_activations: batch_size=2, num_units=3, num_segments=2
dendrite_activations = torch.tensor(
[
[[-1.15, -0.49], [0.87, -0.58], [-0.36, -0.93]],
[[-0.08, -1.00], [-0.71, 0.08], [0.15, 0.40]],
]
)
# Expected max activation per batch per unit.
max_activation = torch.tensor([[-0.49, 0.87, -0.36], [-0.08, 0.08, 0.40]])
# Expected output: dendrites applied as gate
expected_output = y * torch.sigmoid(max_activation)
actual_output = dendrite_layer.apply_dendrites(y, dendrite_activations)
all_matches = (expected_output == actual_output).all()
self.assertTrue(all_matches)
class AbsoluteMaxGatingDendriticLayerTests(unittest.TestCase):
def test_forward_output_shape(self):
"""Validate shape of forward output."""
linear = torch.nn.Linear(10, 10)
dendrite_layer = AbsoluteMaxGatingDendriticLayer(
module=linear,
num_segments=20,
dim_context=15,
module_sparsity=0.7,
dendrite_sparsity=0.9
)
dendrite_layer.rezero_weights()
batch_size = 8
input_dim = dendrite_layer.module.weight.shape[1]
context_dim = dendrite_layer.segments.weights.shape[2]
x = torch.rand(batch_size, input_dim)
context = torch.rand(batch_size, context_dim)
out = dendrite_layer(x, context)
self.assertEqual(out.shape, (8, 10))
def test_segment_sparsity(self):
"""Test sparsity of dendritic segments."""
linear = torch.nn.Linear(10, 11)
dendrite_layer = AbsoluteMaxGatingDendriticLayer(
module=linear,
num_segments=10,
dim_context=100,
module_sparsity=0.7,
dendrite_sparsity=0.9,
dendrite_bias=False,
)
params, nonzero_params = count_nonzero_params(dendrite_layer.segments)
self.assertAlmostEqual(0.1, nonzero_params / params)
self.assertEqual(1100, nonzero_params)
def test_apply_gating_dendrites(self):
"""
Validate the outputs of the absolute max gating layer against hand-computed
outputs.
"""
linear = torch.nn.Linear(10, 10)
dendrite_layer = AbsoluteMaxGatingDendriticLayer(
module=linear,
num_segments=20,
dim_context=15,
module_sparsity=0.7,
dendrite_sparsity=0.9,
dendrite_bias=False,
)
# pseudo output: batch_size=2, out_features=3
y = torch.tensor([[0.1, -0.1, 0.5], [0.2, 0.3, -0.2]])
# pseudo dendrite_activations: batch_size=2, num_units=3, num_segments=3
dendrite_activations = torch.tensor(
[
[[0.43, -1.64, 1.49], [-0.79, 0.53, 1.08], [0.02, 0.04, -0.57]],
[[1.79, -0.48, -0.38], [-0.15, 0.76, -1.13], [1.04, -0.58, -0.31]],
]
)
# Expected absolute max activation per batch per unit
absolute_max_activations = torch.tensor([
[-1.64, 1.08, -0.57],
[1.79, -1.13, 1.04]
])
# Expected output: dendrites applied as bias
expected_output = y * torch.sigmoid(absolute_max_activations)
actual_output = dendrite_layer.apply_dendrites(y, dendrite_activations)
all_matches = (expected_output == actual_output).all()
self.assertTrue(all_matches)
def test_gradients(self):
"""
Validate gradient values to ensure they are flowing through the absolute max
operation. Note that this test doesn't actually consider the values of
gradients, apart from whether they are zero or non-zero.
"""
linear = torch.nn.Linear(10, 10)
dendrite_layer = AbsoluteMaxGatingDendriticLayer(
module=linear,
num_segments=20,
dim_context=15,
module_sparsity=0.7,
dendrite_sparsity=0.9,
dendrite_bias=False,
)
# pseudo output: batch_size=2, out_features=3
y = torch.tensor([[0.1, -0.1, 0.5], [0.2, 0.3, -0.2]])
# pseudo dendrite_activations: batch_size=2, num_units=3, num_segments=3
dendrite_activations = torch.tensor(
[
[[0.43, -1.64, 1.49], [-0.79, 0.53, 1.08], [0.02, 0.04, -0.57]],
[[1.79, -0.48, -0.38], [-0.15, 0.76, -1.13], [1.04, -0.58, -0.31]],
], requires_grad=True
)
output = dendrite_layer.apply_dendrites(y, dendrite_activations)
output.sum().backward()
# Expected gradient mask
expected_grad_mask = torch.tensor(
[
[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0]],
[[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]],
]
)
actual_grad_mask = 1.0 * (dendrite_activations.grad != 0.0)
all_matches = (expected_grad_mask == actual_grad_mask).all()
self.assertTrue(all_matches)
class GatingDendriticLayer2dTests(unittest.TestCase):
def test_forward(self):
""" Validate the output values of the output tensor returned by `forward`. """
# Initialize convolutional layer
conv_layer = torch.nn.Conv2d(
in_channels=2, out_channels=3, kernel_size=2, stride=1, bias=True
)
# Initialize dendrite layer
dendrite_layer = GatingDendriticLayer2d(
module=conv_layer,
num_segments=3,
dim_context=4,
module_sparsity=0.7,
dendrite_sparsity=0.9,
dendrite_bias=False,
)
# Set weights and biases of convolutional layer
conv_layer.weight.data[:] = torch.tensor(
[
[
[[0.0000, 0.3105], [-0.1523, 0.0000]],
[[0.0000, 0.0083], [-0.2167, 0.0483]]
],
[
[[0.1621, 0.0000], [-0.3283, 0.0101]],
[[-0.1045, 0.0261], [0.0000, 0.0000]]
],
[
[[0.0000, -0.0968], [0.0499, 0.0000]],
[[0.0850, 0.0000], [0.2646, -0.3485]]
]
], requires_grad=True
)
conv_layer.bias.data[:] = torch.tensor(
[-0.2027, -0.1821, 0.2152], requires_grad=True
)
# Dendrite weights: num_channels=3, num_segments=3, dim_context=4
dendrite_layer.segments.weights.data[:] = torch.tensor([
[
[-0.4933, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.3805, 0.0000],
[0.0000, 0.0000, 0.0000, -0.1641]
],
[
[0.0000, 0.0000, 0.0000, 0.3555],
[0.0000, 0.0000, 0.0000, 0.1892],
[0.0000, 0.0000, -0.4274, 0.0000]
],
[
[0.0000, 0.0000, 0.0000, 0.0957],
[0.0000, 0.0000, -0.0689, 0.0000],
[0.0000, 0.0000, 0.0000, -0.3192]
]
])
# Input to dendrite layer: batch_size=2, num_channels=2, width=3, height=3
x = torch.tensor([
[
[
[0.1553, 0.3405, 0.2367],
[0.7661, 0.1383, 0.6675],
[0.6464, 0.1559, 0.9777]
],
[
[0.4114, 0.6362, 0.7020],
[0.2617, 0.2275, 0.4238],
[0.6374, 0.8270, 0.7528]
]
],
[
[
[0.8331, 0.7792, 0.4369],
[0.7947, 0.2609, 0.1992],
[0.1527, 0.3006, 0.5496]
],
[
[0.6811, 0.6871, 0.0148],
[0.6084, 0.8351, 0.5382],
[0.7421, 0.8639, 0.7444]
]
]
])
# Context input to dendrite layer: batch_size=2, dim_context=4
context_vectors = torch.tensor([
[1.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 0.0]
])
# Expected max dendrite activations (pre-sigmoid):
# [[0.3805 zero zero]
# [ zero zero zero]]
# Expected output of convolutional layer:
#
# batch item 1 (each row corresponds to an output channel)
# [[[-0.2541 -0.1733] [-0.3545 -0.1585]]
# [[-0.4334 -0.2137] [-0.2900 -0.2137]]
# [[ 0.2454 0.1658] [ 0.1368 0.1342]]]
#
# batch item 2
# [[[-0.1676 -0.2616] [-0.2571 -0.3334]]
# [[-0.3586 -0.2108] [-0.1422 -0.3062]]
# [[ 0.1073 0.2777] [ 0.1446 0.2511]]]
# Overall expected output of dendrite layer:
#
# batch item 1 (each row corresponds to an output channel)
# [[[-0.1509338 -0.10293911] [-0.21057076 -0.094148]]
# [[-0.2167 -0.10685 ] [-0.145 -0.10685 ]]
# [[ 0.1227 0.0829 ] [ 0.0684 0.0671 ]]]
#
# batch item 2
# [[[-0.0838 -0.1308] [-0.1285 -0.1667]]
# [[-0.1793 -0.1054] [-0.0711 -0.1531]]
# [[ 0.0536 0.1389] [ 0.0723 0.1256]]]
expected_output = torch.tensor([
[
[[-0.1509338, -0.10293911], [-0.21057076, -0.094148]],
[[-0.2167, -0.10685], [-0.145, -0.10685]],
[[0.1227, 0.0829], [0.0684, 0.0671]]
],
[
[[-0.0838, -0.1308], [-0.1285, -0.1667]],
[[-0.1793, -0.1054], [-0.0711, -0.1531]],
[[0.0536, 0.1389], [0.0723, 0.1256]]
]
])
actual_output = dendrite_layer(x, context_vectors)
self.assertTrue(torch.allclose(expected_output, actual_output, atol=1e-4))
def test_apply_gating_dendrites(self):
conv_layer = torch.nn.Conv2d(
in_channels=1, out_channels=3, kernel_size=3, stride=1, bias=True
)
dendrite_layer = GatingDendriticLayer2d(
module=conv_layer,
num_segments=20,
dim_context=15,
module_sparsity=0.7,
dendrite_sparsity=0.9,
dendrite_bias=False,
)
# pseudo output: batch_size=2, num_channels=3, height=2, width=2
y = torch.tensor([
[
[[0.3, 0.4], [-0.2, 0.1]],
[[-0.3, 0.5], [-0.1, 0.1]],
[[0.0, 0.1], [0.3, 0.2]]
],
[
[[0.1, -0.2], [-0.2, 0.1]],
[[0.0, 0.1], [-0.4, -0.1]],
[[-0.3, 0.0], [0.2, 0.4]]
],
])
# pseudo dendrite_activations: batch_size=2, num_channels=3, num_segments=3
dendrite_activations = torch.tensor(
[
[[0.4, 0.9, -0.1], [-0.8, 0.7, 0.0], [0.6, -0.6, -0.7]],
[[0.2, 0.8, 0.8], [-0.1, -0.4, -0.5], [0.0, 0.0, 0.0]],
]
)
# Expected max dendrite activations:
# [[0.9 0.7 0.6]
# [0.8 -0.1 0.0]]
# Expected output based on `dendrite_activations`
expected_output = torch.tensor([
[
[[0.2133, 0.2844], [-0.1422, 0.0711]],
[[-0.2005, 0.3341], [-0.0668, 0.0668]],
[[0.0, 0.0646], [0.1937, 0.1291]]
],
[
[[0.0690, -0.1380], [-0.1380, 0.0690]],
[[0.0, 0.0475], [-0.1900, -0.0475]],
[[-0.15, 0.0], [0.1, 0.2]]
],
])
actual_output = dendrite_layer.apply_dendrites(y, dendrite_activations)
all_matches = torch.allclose(expected_output, actual_output, atol=1e-4)
self.assertTrue(all_matches)
def test_gradients(self):
"""
Ensure dendrite gradients are flowing through the layer
`GatingDendriticLayer2d`. Note that this test doesn't actually consider the
values of gradients, apart from whether they are zero or non-zero.
"""
conv_layer = torch.nn.Conv2d(
in_channels=2, out_channels=3, kernel_size=2, stride=1, bias=True
)
dendrite_layer = GatingDendriticLayer2d(
module=conv_layer,
num_segments=3,
dim_context=4,
module_sparsity=0.7,
dendrite_sparsity=0.9,
dendrite_bias=False,
)
# Dendrite weights: num_channels=3, num_segments=3, dim_context=4
dendrite_layer.segments.weights.data[:] = torch.tensor([
[
[-0.4933, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.3805, 0.0000],
[0.0000, 0.0000, 0.0000, -0.1641]
],
[
[0.0000, 0.0000, 0.0000, 0.3555],
[0.0000, 0.0000, 0.0000, 0.1892],
[0.0000, 0.0000, -0.4274, 0.0000]
],
[
[0.0000, 0.0000, 0.0000, 0.0957],
[0.0000, 0.0000, -0.0689, 0.0000],
[0.0000, 0.0000, 0.0000, -0.3192]
]
])
# Input to dendrite layer: batch_size=1, num_channels=2, width=3, height=3
x = torch.randn((1, 2, 3, 3))
# Context input to dendrite layer: batch_size=1, dim_context=4
context_vectors = torch.tensor([[1.0, 0.0, 1.0, 0.0]])
# Expected dendrite activations:
#
# batch item 1 (each row corresponds to an output channel)
# [[-0.4933 0.3805 zero]
# [ zero zero -0.4274]
# [ zero -0.0689 zero]]
# Expected dendrite gradient mask
#
# batch item 1
# [[0 1 0]
# [1 0 0]
# [1 0 0]]
output = dendrite_layer(x, context_vectors)
output.sum().backward()
# Expected gradient mask
expected_grad_mask = torch.tensor([
[[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
[[1.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
[[1.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
])
actual_grad_mask = 1.0 * (dendrite_layer.segments.weights.grad != 0.0)
all_matches = (expected_grad_mask == actual_grad_mask).all()
self.assertTrue(all_matches)
class AbsoluteMaxGatingDendriticLayer2dTests(unittest.TestCase):
def test_forward(self):
""" Validate the output values of the output tensor returned by `forward`. """
# Initialize convolutional layer
conv_layer = torch.nn.Conv2d(
in_channels=2, out_channels=3, kernel_size=2, stride=1, bias=True
)
# Initialize dendrite layer
dendrite_layer = AbsoluteMaxGatingDendriticLayer2d(
module=conv_layer,
num_segments=3,
dim_context=4,
module_sparsity=0.7,
dendrite_sparsity=0.9,
dendrite_bias=False,
)
# Set weights and biases of convolutional layer
conv_layer.weight.data[:] = torch.tensor(
[
[
[[0.0000, 0.3105], [-0.1523, 0.0000]],
[[0.0000, 0.0083], [-0.2167, 0.0483]]
],
[
[[0.1621, 0.0000], [-0.3283, 0.0101]],
[[-0.1045, 0.0261], [0.0000, 0.0000]]
],
[
[[0.0000, -0.0968], [0.0499, 0.0000]],
[[0.0850, 0.0000], [0.2646, -0.3485]]
]
], requires_grad=True
)
conv_layer.bias.data[:] = torch.tensor(
[-0.2027, -0.1821, 0.2152], requires_grad=True
)
# Dendrite weights: num_channels=3, num_segments=3, dim_context=4
dendrite_layer.segments.weights.data[:] = torch.tensor([
[
[-0.4933, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.3805, 0.0000],
[0.0000, 0.0000, 0.0000, -0.1641]
],
[
[0.0000, 0.0000, 0.0000, 0.3555],
[0.0000, 0.0000, 0.0000, 0.1892],
[0.0000, 0.0000, -0.4274, 0.0000]
],
[
[0.0000, 0.0000, 0.0000, 0.0957],
[0.0000, 0.0000, -0.0689, 0.0000],
[0.0000, 0.0000, 0.0000, -0.3192]
]
])
# Input to dendrite layer: batch_size=2, num_channels=2, width=3, height=3
x = torch.tensor([
[
[
[0.1553, 0.3405, 0.2367],
[0.7661, 0.1383, 0.6675],
[0.6464, 0.1559, 0.9777]
],
[
[0.4114, 0.6362, 0.7020],
[0.2617, 0.2275, 0.4238],
[0.6374, 0.8270, 0.7528]
]
],
[
[
[0.8331, 0.7792, 0.4369],
[0.7947, 0.2609, 0.1992],
[0.1527, 0.3006, 0.5496]
],
[
[0.6811, 0.6871, 0.0148],
[0.6084, 0.8351, 0.5382],
[0.7421, 0.8639, 0.7444]
]
]
])
# Context input to dendrite layer: batch_size=2, dim_context=4
context_vectors = torch.tensor([
[1.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 0.0]
])
# Expected absolute max dendrite activations (pre-sigmoid):
# [[-0.4933 -0.4274 -0.0689]
# [ zero zero zero]]
# Expected output of convolutional layer:
#
# batch item 1 (each row corresponds to an output channel)
# [[[-0.2541 -0.1733] [-0.3545 -0.1585]]
# [[-0.4334 -0.2137] [-0.2900 -0.2137]]
# [[ 0.2454 0.1658] [ 0.1368 0.1342]]]
#
# batch item 2
# [[[-0.1676 -0.2616] [-0.2571 -0.3334]]
# [[-0.3586 -0.2108] [-0.1422 -0.3062]]
# [[ 0.1073 0.2777] [ 0.1446 0.2511]]]
# Overall expected output of dendrite layer:
#
# batch item 1 (each row corresponds to an output channel)
# [[[-0.0963335 -0.06570089] [-0.13439679 -0.06008996]]
# [[-0.17108351 -0.08435751] [-0.11447673 -0.08435751]]
# [[ 0.11847466 0.08004522] [ 0.06604455 0.06478932]]]
#
# batch item 2
# [[[-0.0838 -0.1308] [-0.1285 -0.1667]]
# [[-0.1793 -0.1054] [-0.0711 -0.1531]]
# [[ 0.0536 0.1389] [ 0.0723 0.1256]]]
expected_output = torch.tensor([
[
[[-0.0963335, -0.06570089], [-0.13439679, -0.06008996]],
[[-0.17108351, -0.08435751], [-0.11447673, -0.08435751]],
[[0.11847466, 0.08004522], [0.06604455, 0.06478932]]
],
[
[[-0.0838, -0.1308], [-0.1285, -0.1667]],
[[-0.1793, -0.1054], [-0.0711, -0.1531]],
[[0.0536, 0.1389], [0.0723, 0.1256]]
]
])
actual_output = dendrite_layer(x, context_vectors)
self.assertTrue(torch.allclose(expected_output, actual_output, atol=1e-4))
def test_apply_gating_dendrites(self):
conv_layer = torch.nn.Conv2d(
in_channels=1, out_channels=3, kernel_size=3, stride=1, bias=True
)
dendrite_layer = AbsoluteMaxGatingDendriticLayer2d(
module=conv_layer,
num_segments=20,
dim_context=15,
module_sparsity=0.7,
dendrite_sparsity=0.9,
dendrite_bias=False,
)
# pseudo output: batch_size=2, num_channels=3, height=2, width=2
y = torch.tensor([
[
[[0.3, 0.4], [-0.2, 0.1]],
[[-0.3, 0.5], [-0.1, 0.1]],
[[0.0, 0.1], [0.3, 0.2]]
],
[
[[0.1, -0.2], [-0.2, 0.1]],
[[0.0, 0.1], [-0.4, -0.1]],
[[-0.3, 0.0], [0.2, 0.4]]
],
])
# pseudo dendrite_activations: batch_size=2, num_channels=3, num_segments=3
dendrite_activations = torch.tensor(
[
[[0.4, 0.9, -0.1], [-0.8, 0.7, 0.0], [0.6, -0.6, -0.7]],
[[0.2, 0.8, 0.8], [-0.1, -0.4, 0.5], [0.0, 0.0, 0.0]],
]
)
# Expected absolute max dendrite activations (pre-sigmoid):
# [[0.9 -0.8 -0.7]
# [0.8 0.5 0.0]]
# Expected output based on `dendrite_activations`
expected_output = torch.tensor([
[
[[0.2133, 0.2844], [-0.1422, 0.0711]],
[[-0.093, 0.155], [-0.031, 0.031]],
[[0.0, 0.0332], [0.0995, 0.0664]]
],
[
[[0.069, -0.138], [-0.138, 0.069]],
[[0.0, 0.0622], [-0.249, -0.0622]],
[[-0.15, 0.0], [0.1, 0.2]]
],
])
actual_output = dendrite_layer.apply_dendrites(y, dendrite_activations)
all_matches = torch.allclose(expected_output, actual_output, atol=1e-4)
self.assertTrue(all_matches)
def test_gradients(self):
"""
Ensure dendrite gradients are flowing through the layer
`AbsoluteMaxGatingDendriticLayer2d`. Note that this test doesn't actually
consider the values of gradients, apart from whether they are zero or non-zero.
"""
conv_layer = torch.nn.Conv2d(
in_channels=2, out_channels=3, kernel_size=2, stride=1, bias=True
)
dendrite_layer = AbsoluteMaxGatingDendriticLayer2d(
module=conv_layer,
num_segments=3,
dim_context=4,
module_sparsity=0.7,
dendrite_sparsity=0.9,
dendrite_bias=False,
)
# Dendrite weights: num_channels=3, num_segments=3, dim_context=4
dendrite_layer.segments.weights.data[:] = torch.tensor([
[
[-0.4933, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.3805, 0.0000],
[0.0000, 0.0000, 0.0000, -0.1641]
],
[
[0.0000, 0.0000, 0.0000, 0.3555],
[0.0000, 0.0000, 0.0000, 0.1892],
[0.0000, 0.0000, -0.4274, 0.0000]
],
[
[0.0000, 0.0000, 0.0000, 0.0957],
[0.0000, 0.0000, -0.0689, 0.0000],
[0.0000, 0.0000, 0.0000, -0.3192]
]
])
# Input to dendrite layer: batch_size=1, num_channels=2, width=3, height=3
x = torch.randn((1, 2, 3, 3))
# Context input to dendrite layer: batch_size=1, dim_context=4
context_vectors = torch.tensor([[1.0, 0.0, 1.0, 0.0]])
# Expected dendrite activations:
#
# batch item 1 (each row corresponds to an output channel)
# [[-0.4933 0.3805 zero]
# [ zero zero -0.4274]
# [ zero -0.0689 zero]]
# Expected dendrite gradient mask
#
# batch item 1
# [[1 0 0]
# [0 0 1]
# [0 1 0]]
output = dendrite_layer(x, context_vectors)
output.sum().backward()
# Expected gradient mask
expected_grad_mask = torch.tensor([
[[1.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 1.0, 0.0]],
[[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
])
actual_grad_mask = 1.0 * (dendrite_layer.segments.weights.grad != 0.0)
all_matches = (expected_grad_mask == actual_grad_mask).all()
self.assertTrue(all_matches)
if __name__ == "__main__":
unittest.main(verbosity=2)
| agpl-3.0 |
KenLauLab/pCreode | pcreode/pcreode.py | 1 | 23408 | # p-Creode algorithm maps cell developmental trajectories
# Copyright (C) 2017 Charles A Herring
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import matplotlib.pyplot as _plt
import pandas as _pd
import igraph as _igraph
import numpy as np
from sklearn.decomposition import PCA as _PCA
from sklearn.metrics import pairwise_distances as _pairwise_distances
from sklearn import preprocessing as _preprocessing
import os as _os
from igraph import *
from .functions import *
import matplotlib
from IPython.display import display, Image
#################################################
class PCA( object):
def __init__( self, data):
"""
Container class for single cell data
:param data: DataFrame of cells with X proteins representing expression
"""
if not ( isinstance( data, _pd.DataFrame)):
raise TypeError( 'data must be of pandas DataFrame')
self._data = data.values
self._protein_list = data.columns
self._cell_count = data.shape[0]
self._protein_count = data.shape[1]
#print( 'Cell count = {0}, Gene/protein count = {1}'.format( data.shape[0],data.shape[1]))
def get_pca( self): #gets a PCA of the data the object was initialized with
"""
Principal component analysis of data
"""
pca = _PCA()
self.pca = pca.fit_transform( self._data)
self.pca_explained_var = pca.explained_variance_ratio_ * 100 #output a percent of the variance explained within the object defined here
return
def pca_plot_explained_var( self, figsize=(6,6), xlim=(0,25)):
"""
Plot the variance explained by different principal components
:param figsize: size of plot to return
"""
if self.pca_explained_var is None:
raise RuntimeError('Please run get_pca() before plotting')
fig = _plt.figure( figsize=figsize)
ax = fig.add_subplot(111)
ax.set_xlabel( 'PC#')
ax.set_ylabel( 'Explained Var')
ax.set_xlim( xlim)
ax.plot( range( len( self.pca_explained_var)), self.pca_explained_var, '-o') #
return
def pca_set_components( self, n_components):
"""
Set principal component analysis to desired set of components
:param n_components: Number of components to keep for further analysis
:return: All data points with selected PCA components
"""
if self.pca_explained_var is None:
raise RuntimeError('Please run get_pca() before selecting components')
return( self.pca[:,:n_components])
#################################################
class Density( object):
def __init__( self, preprocessed_data, metric='euclidean'):
"""
Container class for generating density file used to downsample data
:param preprocessed_data: numpy array of preprocessed data
:param metic: distance metric used by sklearn
"""
if not ( isinstance( preprocessed_data, np.ndarray)):
raise TypeError( 'preprocessed_data must be numpy array')
if ( ~np.in1d( metric, ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', 'manhattan'])):
raise TypeError( 'metric must be one of the following cityblock, cosine, euclidean, l1, l2, or manhattan')
self._data = preprocessed_data
self._n_pts = preprocessed_data.shape[0]
self._n_components = preprocessed_data.shape[1]
self._metric = metric
def nearest_neighbor_hist( self, n_rand_pts=5000, n_bins=200, figsize=(8,6), metric='euclidean', mute=False):
"""
Plots a histogram of distance to nearest neighbor for select number of random points
and returns a best guess for the radius used for density calculations
:param n_rand_pts: Number of random pts to use to generate histogram
:patam n_bins: Number of bins used to generate histogram
:param figsize: size of plot to return
:param mute: boolean operator to suppress print statements
:return: Histograom of distances to nearest neighbors
"""
# Save sys.stdout to return print output if muted
old_stdout = sys.stdout
# Mute print statements if True
if( mute==True):
sys.stdout = open( os.devnull, 'w')
if ( n_rand_pts>self._n_pts):
n_rand_pts=self._n_pts
r_inds = np.random.choice( range( self._n_pts), size=n_rand_pts)
dists = _pairwise_distances( self._data[r_inds,:], self._data, metric=self._metric)
dists_sort = np.sort( dists, axis=1)
# plotting configurations
fig = _plt.figure( figsize=figsize)
ax = fig.add_subplot(111)
ax.set_xlabel( 'Distance to Nearest Neighbor')
ax.set_ylabel( 'Number of Datapoints')
ax.hist( dists_sort[:,1], bins=n_bins)
# plot line for best guess starting radius in downsampling
best_guess = np.median( np.sort( dists_sort[:,1])[-20:])
ax.axvline( best_guess, color='r')
print( "best guess starting radius = {}".format( best_guess))
# return to normal treatment of print statements
sys.stdout = old_stdout
return( best_guess)
def radius_best_guess( self, n_rand_pts=5000, metric='euclidean'):
"""
Returns a best guess for the radius based on a select number of random points
:param n_rand_pts: Number of random pts to use to generate histogram
:return: float numeric for best guess of radius
"""
if ( n_rand_pts>self._n_pts):
n_rand_pts=self._n_pts
r_inds = np.random.choice( range( self._n_pts), size=n_rand_pts)
dists = _pairwise_distances( self._data[r_inds,:], self._data, metric=self._metric)
dists_sort = np.sort( dists, axis=1)
# plotting configurations
best_guess = np.median( np.sort( dists_sort[:,1])[-20:])
return( best_guess)
def get_density( self, radius, chunk_size=5000, mute=False):
"""
Calculates the density of each datapoint
:param radius: Radius around each datapoints used for density calculations
:param chunk_size: Number of cells to consider during each iteration due to memory restrictions
:param mute: boolean operator to suppress print statements
:return: Calculated densities for all datapoints
"""
# Save sys.stdout to return print output if muted
old_stdout = sys.stdout
# Mute print statements if True
if( mute==True):
sys.stdout = open( os.devnull, 'w')
# Due to memory restrictions density assignments have to be preformed in chunks
all_chunks = get_chunks( range( self._n_pts), chunk_size)
# create array to hold all densities
density = np.empty((self._n_pts), dtype=int)
# create a nested array of indices for each cell within rad
neighbors = np.empty((self._n_pts), dtype=object)
for chunk in all_chunks:
chunk_dist = _pairwise_distances( self._data[chunk,:], self._data, n_jobs=1, metric=self._metric)
print( "calculating densities for datapoints: {0} -> {1}".format( chunk[0], chunk[-1]))
for chunk_ind, ind in enumerate( chunk):
neighbors[ind] = np.setdiff1d( np.ravel( np.argwhere( chunk_dist[chunk_ind]<=radius).ravel()), ind)
density[ind] = len( neighbors[ind])
print( "****Always check density overlay for radius fit****")
self.density = density
self.neighbors = neighbors
# return to normal treatment of print statements
sys.stdout = old_stdout
return( density)
def density_hist( self, n_bins=200, figsize=(8,6)):
"""
Plots a histogram of datapoints' density
:patam n_bins: Number of bins used to generate histogram
:param figsize: size of plot to return
:return: Histograom of densities
"""
if self.density is None:
raise RuntimeError('Please run get_density() before plotting')
# plotting configurations
fig = _plt.figure( figsize=figsize)
ax = fig.add_subplot(111)
ax.set_xlabel( 'Density')
ax.set_ylabel( 'Number of Datapoints')
ax.hist( self.density, bins=n_bins)
return
#################################################
class Analysis( object):
def __init__( self, file_path, graph_id, data, density, noise, metric='euclidean'):
"""
Container class for analyzing pCreode results
:param file_path: path to directory where graph files are stored
:param graph_id: graph ID to plot in given directory
:param data: data used to produce pCreode graphs
:param density: data point densities used to create p-Creode graph
:param metic: distance metric used by sklearn, in this case to average the node values
"""
if not ( _os.path.exists( file_path)):
raise TypeError( 'please supply a valid file path directory')
if ( ~np.in1d( metric, ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', 'manhattan'])):
raise TypeError( 'metric must be one of the following cityblock, cosine, euclidean, l1, l2, or manhattan')
self._file_path = file_path
self._graph_id = graph_id
self._data = data
self._density = density
self._noise = noise
# cells data are above the noise threshold
self.good_cells = data[density>noise]
self.good_cells_inds = np.arange( len( data))[density>noise]
# node_data_indices are the node indices from the data set used to run pCreode
self.node_data_indices = np.genfromtxt( file_path + 'ind_{}.csv'.format( self._graph_id), delimiter=',').astype( int)
self.num_nodes = len( self.node_data_indices)
# node_graph_indices refers to the node indices within the graph
self.node_graph_indices = np.arange( self.num_nodes)
# read adjacency matrix for graph
adj = pd.read_csv( self._file_path + 'adj_{}.txt'.format(self._graph_id), sep=" ", header=None)
# get distance between nodes in graph
self.dist = pairwise_distances( self._data[self.node_data_indices,:], self._data[self.node_data_indices,:], n_jobs=1, metric='l2')
# calculate weighted adjacency matric
w_adj = np.multiply( adj.values, self.dist)
# create graph to plots
self.graph = _igraph.Graph.Weighted_Adjacency( w_adj.tolist(), mode=ADJ_UNDIRECTED)
self.graph.vs["label"] = range( 0, self.num_nodes+1)
def plot_save_graph( self, seed, overlay, file_out, upper_range=3, node_label_size=0):
"""
Plots a p-Creode graph with given overlay
:param seed: random interger to be used to seed graph plot
:param overlay: characteristic to overlay on graph, likely from preprocessed data set
:param file_out: name to give saved graph in file_path provided
:param upper_range: upper range from which to normalize overlay to, this will vary with analyte
:param node_label_size: size of node labels, when set to zero (default) no label will be plotted
:return: A plot of selected p-Creode graph with given overlay and saves a png in file_path with file_out name
"""
# normalize densities to use for nodes sizes in graph plot
norm_dens = preprocessing.MinMaxScaler( feature_range=(8,30))
dens = norm_dens.fit_transform( self._density.astype( float).reshape(-1, 1))[self.node_data_indices]
# normalize overlay to use for node overlays
norm_ana = preprocessing.MinMaxScaler( feature_range=(0, upper_range))
norm_ana.fit( overlay[self._density>self._noise].values.astype( np.float).reshape(-1, 1))
old_ana = norm_ana.transform( overlay[self._density>self._noise].values.astype( np.float).reshape(-1, 1))
# bin the data points to each node so that an average of closest surrounding nodes is used for overlay
bin_dist = pairwise_distances( self.good_cells, self._data[self.node_data_indices])
bin_assignments = np.argmin( bin_dist, axis=1)
new_ana = overlay.values[self.node_data_indices]
for ii in range( self.num_nodes):
new_ana[ii] = np.mean( old_ana[bin_assignments==ii])
norm_1 = np.array( new_ana, dtype=float)
cl_vals_1 = [[]]*self.num_nodes
# colors to use for overlay
get_cl = _plt.get_cmap('RdYlBu_r')
for jj in range( self.num_nodes):
cl_vals_1[jj] = get_cl( norm_1[jj])
self.graph.vs["color"] = [cl_vals_1[kk] for kk in range( self.num_nodes)]
random.seed( seed)
layout = self.graph.layout_kamada_kawai( maxiter=2000)
plot( self.graph, self._file_path + '{0}.png'.format(file_out), layout=layout, bbox=(1200,1200),
vertex_size=dens, edge_width=2, vertex_label_size=node_label_size)
display( Image( filename=self._file_path + file_out + '.png', embed=True, unconfined=True, width=600,height=600))
return
def get_single_trajectory_indices( self, start_id, stop_id):
"""
Returns the node indices from the data set used to create the graph when supplied with graph indices from the plot_save_graph
:param indices_of_interest: indices of interest plotted with the plot_save_graph function
:return: list of indices from data set used to create the graph
"""
path_ids = np.ravel( self.graph.get_shortest_paths( start_id, stop_id)[0])
return( self.node_data_indices[path_ids])
def plot_analyte_dynamics( self, overlay, root_id):
"""
Returns bar plots of analyte dynamics over all trajectories when supplied with a root node
:param overlay: characteristic to overlay on graph, likely from preprocessed data set
:param root_id: graph node index for a root node (graph index is not the same as the data indexing)
:return: bar plot of analyte dynamics for each trajectory, starting from a common root node
"""
# get all end-state nodes, including root if also an end-state node (degree==1)
all_end_id = self.node_graph_indices[np.transpose( self.graph.degree())==1]
# get end-states that arn't the root node
end_ids = all_end_id[all_end_id!=root_id]
# return all trajectories from root node to all other end-states
traj = np.ravel( self.graph.get_shortest_paths( root_id, end_ids))
old_ana = overlay[self._density>self._noise].values.astype( np.float)
# bin the data points to each node so that an average of closest surrounding nodes is used for overlay
bin_dist = pairwise_distances( self.good_cells, self._data[self.node_data_indices])
bin_assignments = np.argmin( bin_dist, axis=1)
new_ana = overlay.values[self.node_data_indices]
new_std = np.zeros_like( new_ana)
for ii in range( self.num_nodes):
new_ana[ii] = np.mean( old_ana[bin_assignments==ii])
# get standard error for error bars
new_std[ii] = np.std( old_ana[bin_assignments==ii]) / float( np.sqrt( len(old_ana[bin_assignments==ii])))
# get branch points
branch_id = self.node_graph_indices[np.transpose( self.graph.degree())>=3]
num_traj = len( traj)
# get longest length of trajectories to set x-axis scale
xlim = max([len(a) for a in traj])
_plt.figure(1, figsize=(12,num_traj*4.0))
for ii in range( num_traj):
_plt.subplot(num_traj,1,ii+1)
_plt.bar( range( len( new_ana[traj[ii]])), new_ana[traj[ii]], width=1.0, color='green', yerr=new_std[traj[ii]])
_plt.ylim(0,max(new_ana)+max(new_std))
_plt.xlim(0,xlim)
# plot where trajectory ends
_plt.axvline( x=len( new_ana[traj[ii]]), color='black', linewidth=2.5, linestyle='--')
# plot branch points if they exist, likely always will
br_ck = np.in1d( branch_id, traj[ii])
if( any( br_ck)):
vl = branch_id[br_ck]
for kk in vl:
vl2 = np.argwhere( traj[ii]==kk)
_plt.axvline( x=vl2, color='red', linewidth=2.5, linestyle='--')
_plt.title('Trajectory {}'.format(ii+1), fontsize=14, loc='right')
_plt.xlabel('p-Creode Trajectory', fontsize=12)
_plt.ylabel('Expression', fontsize=12)
return
def get_complete_analyte_gene_trajectories( self, overlay_data, root_id, file_out):
"""
Returns csv files for dynamics of all analytes for each trajectory when supplied with a root node
:param overlay_data: data set containing all analytes, must be a pandas dataframe
:param root_id: graph node index for a root node (graph index is not the same as the data indexing)
:param file_out: name to give saved graph in file_path provided
:return: csv files for analyte dynamics stored in file_path with graph files, as well as a file for cluster IDs
"""
if not ( isinstance( overlay_data, _pd.DataFrame)):
raise TypeError( 'data must be of pandas DataFrame')
# get all end-state nodes, including root if also an end-state node (degree==1)
all_end_id = self.node_graph_indices[np.transpose( self.graph.degree())==1]
# get end-states that arn't the root node
end_ids = all_end_id[all_end_id!=root_id]
# return all trajectories from root node to all other end-states
traj = np.ravel( self.graph.get_shortest_paths( root_id, end_ids))
# lazy work around for when ravel is not needed with non-branching trajectories
if( len( end_ids)==1):
traj = [traj]
num_traj = len( end_ids)
num_ana = overlay_data.shape[1]
old_ana = overlay_data.values[self._density>self._noise]
# bin the data points to each node so that an average of closest surrounding nodes is used for overlay
bin_dist = pairwise_distances( self.good_cells, self._data[self.node_data_indices])
bin_assignments = np.argmin( bin_dist, axis=1)
new_ana = old_ana
#print( len( bin_assignments), len( self.good_cells_inds))
np.savetxt( self._file_path + "{}_clust_ids.csv".format( file_out), np.vstack( (self.good_cells_inds, bin_assignments)), delimiter=',')
for hh in range( num_ana):
for ii in range( self.num_nodes):
itr_ana = old_ana[bin_assignments==ii,hh]
# if no cells are binned to that node
if( itr_ana.size==0):
continue
new_ana[ii,hh] = np.mean( itr_ana)
for cc in range( num_traj):
traj_ana = pd.DataFrame( new_ana[traj[cc]].T, index=overlay_data.columns, columns=traj[cc])
traj_ana.to_csv( self._file_path + '{0}_traj{1}_analytes.csv'.format( file_out, cc+1))
return
def plot_save_qual_graph( self, seed, overlay, file_out):
"""
Plots a p-Creode graph with given overlay
:param seed: random interger to be used to seed graph plot
:param overlay: numpy string of qualitative characteristic to overlay on graph
:param file_out: name to give saved graph in file_path provided
:return: A plot of selected p-Creode graph with qualitative overlay and saves a png in file_path with file_out name
"""
if not ( isinstance( overlay, np.ndarray)):
raise TypeError( 'overlay variable must be numpy array')
if not ( overlay.dtype.char == 'U'):
raise TypeError( 'All elements in overlay variable must be in a string dtype')
# get list of colors to be used for labeling
colors = np.array( [])
cl_names = np.array( [])
for name, hex in matplotlib.colors.cnames.items(): #items instead of iteritems
colors = np.append( colors, hex)
cl_names = np.append( cl_names, name)
# normalize densities to use for nodes sizes in graph plot
norm_dens = preprocessing.MinMaxScaler( feature_range=(8,30))
dens = norm_dens.fit_transform( self._density.astype( float).reshape(-1, 1))[self.node_data_indices]
# bin the data points to each node so that an average of closest surrounding nodes is used for overlay
bin_dist = pairwise_distances( self.good_cells, self._data[self.node_data_indices])
bin_assignments = np.argmin( bin_dist, axis=1)
new_ana = overlay[self.node_data_indices]
for ii in range( self.num_nodes):
u_over = np.unique( overlay[np.where( bin_assignments==ii)])
uniqs = np.unique( overlay[np.where( bin_assignments==ii)], return_counts=True)[1]
# skip nodes with no cells assigned to it
if( uniqs.size==0):
continue
new_ana[ii] = u_over[np.argmax( uniqs)]
ids_ana = np.zeros(self.num_nodes, dtype=int)
zz = 0
for ii in np.unique( overlay):
ids_ana[new_ana==ii] = zz
zz = zz + 1
self.graph.vs["color"] = [colors[kk] for kk in ids_ana]
random.seed( seed)
layout = self.graph.layout_kamada_kawai( maxiter=2000)
plot( self.graph, self._file_path + '{0}.png'.format(file_out), layout=layout, bbox=(1200,1200),
vertex_size=dens, edge_width=2, vertex_label_size=0)
display( Image( filename=self._file_path + file_out + '.png', embed=True, unconfined=True, width=600,height=600))
x = np.linspace( 0, 100, len( np.unique( overlay)))
y = [0]*len( x)
label = np.unique( overlay)
cls = cl_names[:len(x)]
fig, ax = _plt.subplots( 1, figsize=(15,2))
ax.scatter(x, y, s=1000, c=cls, label=label)
for i, txt in enumerate( label):
ax.annotate(txt, (x[i]-1.0,y[i]+0.075))
_plt.axis( 'off')
_plt.show()
return
| gpl-2.0 |
MicrosoftGenomics/PySnpTools | pysnptools/standardizer/beta.py | 1 | 2775 | import numpy as np
import scipy as sp
import logging
import warnings
from pysnptools.standardizer import Standardizer
class Beta(Standardizer):
'''
A :class:`.Standardizer` to beta standardize SNP data.
See :class:`.Standardizer` for more information about standardization.
**Constructor:**
:Parameters: * **a** (*float*) -- The *a* parameter of the beta distribution
* **b** (*float*) -- The *b* parameter of the beta distribution
>>> from pysnptools.standardizer import Beta
>>> from pysnptools.snpreader import Bed
>>> snpdata1 = Bed('../../tests/datasets/all_chr.maf0.001.N300',count_A1=False).read().standardize(Beta(1,25))
>>> print('{0:.6f}'.format(snpdata1.val[0,0]))
0.680802
'''
def __init__(self,a,b):
super(Beta, self).__init__()
self.a = a
self.b = b
def __repr__(self):
return "{0}(a={1},b={2})".format(self.__class__.__name__,self.a,self.b)
def standardize(self, snpdata, block_size=None, return_trained=False, force_python_only=False): #!!!later why is the 2nd argument called 'snpdata' here, but 'snps' in unit.py?
if block_size is not None:
warnings.warn("block_size is deprecated (and not needed, since standardization is in-place", DeprecationWarning)
if hasattr(snpdata,"val"):
val = snpdata.val
else:
warnings.warn("standardizing an nparray instead of a SnpData is deprecated", DeprecationWarning)
val = snpdata
stats = self._standardize_unit_and_beta(val, is_beta=True, a=self.a, b=self.b, apply_in_place=True, use_stats=False,stats=None,force_python_only=force_python_only)
if return_trained:
from pysnptools.standardizer import BetaTrained
assert hasattr(snpdata,"val"), "return_trained=True must be used with SnpData"
return snpdata, BetaTrained(self.a,self.b,snpdata.sid,stats)
else:
return snpdata
def _merge_trained(self, trained_list):
from pysnptools.standardizer import BetaTrained
sid = np.concatenate([trained.sid for trained in trained_list])
stats = np.concatenate([trained.stats for trained in trained_list])
a_set = set([trained.a for trained in trained_list])
b_set = set([trained.b for trained in trained_list])
assert len(a_set) <= 1,"Expect all BetaTrained's to have the same 'a'"
assert len(b_set) <= 1,"Expect all BetaTrained's to have the same 'b'"
a = list(a_set)[0] if a_set else None
b = list(b_set)[0] if b_set else None
return BetaTrained(a, b, sid, stats)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
import doctest
doctest.testmod()
| apache-2.0 |
nelson-liu/scikit-learn | examples/linear_model/plot_theilsen.py | 98 | 3846 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <florian.wilhelm@gmail.com>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
colors = {'OLS': 'turquoise', 'Theil-Sen': 'gold', 'RANSAC': 'lightgreen'}
lw = 2
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.scatter(x, y, color='indigo', marker='x', s=40)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred, color=colors[name], linewidth=lw,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.title("Corrupt y")
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.scatter(x, y, color='indigo', marker='x', s=40)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred, color=colors[name], linewidth=lw,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.title("Corrupt x")
plt.show()
| bsd-3-clause |
mavrix93/LightCurvesClassifier | lcc/stars_processing/tools/visualization.py | 1 | 15010 | import numpy as np
import os
import warnings
from sklearn import decomposition
import pandas as pd
from lcc.utils.helpers import check_depth
from matplotlib import pyplot as plt
def plotProbabSpace(star_filter, plot_ranges=None, opt="show",
path=".", file_name="params_space.png", N=400,
title="Params space", x_lab="", y_lab="",
searched_coords=None, contamination_coords=None, OVERLAY=0.6):
"""
Plot params space
Parameters
----------
star_filter : StarsFilter object
Trained stars filter object
plot_ranges : tuple, list
List of ranges. For example: [(1,10), (20,50)] - for 2D plot
opt : str
Option whether save/show/return
title : str
Title of the plot
path : str
Path to the output file location
file_name : str
Name of the file
OVERLAY : float
Percentage overlay of borders despite of data ranges
Returns
-------
None
"""
contamination_coords = contamination_coords if contamination_coords is not None else []
searched_coords = searched_coords if searched_coords is not None else []
if not len(searched_coords) and not len(contamination_coords) and hasattr(star_filter,
"searched_coords") and hasattr(
star_filter, "others_coords"):
searched_coords = star_filter.searched_coords.values
contamination_coords = star_filter.others_coords.values
dim = len(star_filter.searched_coords.columns)
if isinstance(searched_coords, pd.DataFrame) and isinstance(contamination_coords, pd.DataFrame):
searched_coords = searched_coords.values.tolist()
contamination_coords = contamination_coords.values.tolist()
if not plot_ranges:
plot_ranges = []
trained_coo = np.array(
star_filter.searched_coords.values.tolist() + star_filter.others_coords.values.tolist()).T
for i in range(dim):
rang = [np.min(trained_coo[i]), np.max(trained_coo[i])]
overl = abs(rang[0] - rang[1]) * OVERLAY
plot_ranges.append([rang[0] - overl, rang[1] + overl])
if dim == 1:
if not x_lab and not y_lab:
x_lab = star_filter.descriptors[0].LABEL
y_lab = "Probability"
plt_data = plot1DProbabSpace(
star_filter, plot_ranges, N,
searched_coords=searched_coords,
contaminatiom_coords=contamination_coords)
elif dim == 2:
if not x_lab and not y_lab:
if len(star_filter.descriptors) == 2:
x_lab = star_filter.descriptors[0].LABEL
y_lab = star_filter.descriptors[1].LABEL
else:
labels = []
for desc in star_filter.descriptors:
if hasattr(desc.LABEL, "__iter__"):
labels += desc.LABEL
else:
labels.append(desc.LABEL)
if len(labels) == 2:
x_lab = labels[0]
y_lab = labels[1]
else:
x_lab = ", ".join(labels)
y_lab = ""
plt_data = plot2DProbabSpace(star_filter, plot_ranges, N,
searched_coords=searched_coords,
contaminatiom_coords=contamination_coords)
else:
plt_data = plotNDProbabSpace(star_filter, plot_ranges, N,
searched_coords=searched_coords,
contaminatiom_coords=contamination_coords
)
# return np.array([[], [], []])
plt.xlabel(str(x_lab))
plt.ylabel(str(y_lab))
plt.title(str(title))
if opt == "show":
plt.show()
elif opt == "save":
plt.savefig(os.path.join(path, file_name))
elif opt == "return":
return plt_data
def plotNDProbabSpace(star_filter, plot_ranges, N, searched_coords=[],
contaminatiom_coords=[]):
"""
Plot N dim probability space
Parameters
----------
star_filter : StarsFilter object
Trained stars filter
plot_ranges : iterable
Ranges (max/min) for all axis
N : int
Number of points per axis
searched_coords : list, iterable
List of coordinates of searched objects
contaminatiom_coords : list, iterable
List of coordinates of contamination objects
Returns
-------
tuple
x, y, Z
"""
OVERLAY = 0.4
ns = len(searched_coords)
if not isinstance(searched_coords, list):
try:
searched_coords = searched_coords.tolist()
contaminatiom_coords = contaminatiom_coords.tolist()
except:
pass
coords = searched_coords + contaminatiom_coords
if not coords:
try:
coords = star_filter.searched_coords.values.tolist() + star_filter.others_coords.values.tolist()
except:
try:
coords = star_filter.searched_coords.tolist() + star_filter.others_coords.tolist()
except:
coords = star_filter.searched_coords + star_filter.others_coords
pca = decomposition.PCA(n_components=2)
pca.fit(coords)
red_coords = pca.transform(coords)
xmax, ymax = np.max(red_coords, axis=0).tolist()
xmin, ymin = np.min(red_coords, axis=0).tolist()
xwidth = xmax - xmin
ywidth = ymax - ymin
x = np.linspace(xmin - xwidth*OVERLAY, xmax + xwidth*OVERLAY, N)
y = np.linspace(ymin - ywidth*OVERLAY, ymax + ywidth*OVERLAY, N)
X, Y = np.meshgrid(x, y)
to_transf = np.c_[X.ravel(), Y.ravel()]
back_transf_data = pca.inverse_transform(to_transf)
z = np.array(star_filter.evaluateCoordinates(back_transf_data))
Z = z.reshape(X.shape)
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.pcolor(X, Y, Z)
plt.colorbar()
if coords:
s = np.array(red_coords[:ns]).T
c = np.array(red_coords[ns:]).T
plt.plot(s[0], s[1], "m*", label="Searched objects", markersize=17)
plt.plot(
c[0], c[1], "k*", label="Contamination objects", markersize=17)
plt.legend()
return x, y, Z, pca
def plot2DProbabSpace(star_filter, plot_ranges, N, searched_coords=[],
contaminatiom_coords=[]):
"""
Plot probability space
Parameters
----------
star_filter : StarsFilter object
Trained stars filter
plot_ranges : iterable
Ranges (max/min) for all axis
N : int
Number of points per axis
searched_coords : list, iterable
List of coordinates of searched objects
contaminatiom_coords : list, iterable
List of coordinates of contamination objects
Returns
-------
tuple
x, y, Z
"""
if check_depth(plot_ranges, 1, ifnotraise=False):
plot_ranges = [plot_ranges, plot_ranges]
x = np.linspace(plot_ranges[0][0], plot_ranges[0][1], N)
y = np.linspace(plot_ranges[1][0], plot_ranges[1][1], N)
X, Y = np.meshgrid(x, y)
z = np.array(star_filter.evaluateCoordinates(np.c_[X.ravel(), Y.ravel()]))
Z = z.reshape(X.shape)
plt.xlim(plot_ranges[0][0], plot_ranges[0][1])
plt.ylim(plot_ranges[1][0], plot_ranges[1][1])
plt.pcolor(X, Y, Z)
plt.colorbar()
if len(searched_coords) or len(contaminatiom_coords):
s = np.array(searched_coords).T
c = np.array(contaminatiom_coords).T
plt.plot(s[0], s[1], "m*", label="Searched objects", markersize=17)
plt.plot(
c[0], c[1], "k*", label="Contamination objects", markersize=17)
plt.legend()
return x, y, Z
def plot1DProbabSpace(star_filter, plot_ranges, N,
searched_coords=[], contaminatiom_coords=[]):
"""
Plot probability space
Parameters
----------
star_filter : StarsFilter object
Trained stars filter
plot_ranges : iterable
Ranges (max/min) for all axis
N : int
Number of points per axis
searched_coords : list, iterable
List of coordinates of searched objects
contaminatiom_coords : list, iterable
List of coordinates of contamination objects
Returns
-------
tuple
x, y
"""
if check_depth(plot_ranges, 2, ifnotraise=False):
plot_ranges = plot_ranges[0]
x = np.linspace(plot_ranges[0], plot_ranges[1])
y = star_filter.evaluateCoordinates([[y] for y in x])
plt.plot(x, y, linewidth=3)
if len(searched_coords) or len(contaminatiom_coords):
s = [qq[0] for qq in searched_coords]
c = [qq[0] for qq in contaminatiom_coords]
s_weights = np.ones_like(s) / len(s)
c_weights = np.ones_like(c) / len(c)
plt.hist(s, bins=x,
histtype='bar', weights=s_weights,
label="Searched objects")
plt.hist(c, bins=x,
histtype='bar', weights=c_weights,
label="Contamination objects")
plt.legend()
return x, np.array(y)
def plotHist(searched_coo, cont_coo, labels=[], bins=None, save_path=None,
file_name="hist.png"):
"""
Plot histogram
Parameters
----------
searched_coo : iterable
Coordinates of searched objects to plot the histogram
cont_coo : iterable
Coordinates of contamination objects to plot the histogram
labels : list, tuple of str
Labels for axis
save_path : str, NoneType
Path to the folder where plots are saved if not None, else
plots are showed immediately
bins : int, NoneType
Number of bins for histogram
file_name : str
Name of the plot file
Returns
-------
None
"""
x = np.array(searched_coo).T
y = np.array(cont_coo).T
if len(x) != len(y):
raise Exception(
"Dimension of both searched and contamination sample have to be the same.\nGot: %i, %i" % (len(x), len(y)))
if len(x) != len(labels):
warnings.warn(
"Dimension of the dimension of train sample and labels have to be the same.\nGot: %i, %i" % (len(x), len(labels)))
labels = ["" for _ in x]
for x_param, y_param, lab in zip(x, y, labels):
plt.clf()
if not bins:
x_bins = 1 + 3.32 * np.log10(len(x_param))
y_bins = 1 + 3.32 * np.log10(len(y_param))
else:
x_bins = bins
y_bins = bins
x_weights = np.ones_like(x_param) / len(x_param)
y_weights = np.ones_like(y_param) / len(y_param)
plt.hist(x_param, bins=int(x_bins), weights=x_weights,
histtype='bar', color="crimson",
label="Searched objects")
plt.hist(y_param, bins=int(y_bins), weights=y_weights,
label="Others")
plt.title("Distribution of the parameters coordinates")
plt.xlabel(lab)
plt.ylabel("Normalized counts")
plt.legend()
if save_path:
plt.savefig(os.path.join(
save_path, file_name + "_hist_%s.png" % (lab.replace(" ", "_"))))
else:
plt.show()
def plotUnsupProbabSpace(coords, decider, opt="show", N=100):
if len(coords) and len(coords[0]) == 2:
return plot2DUnsupProbabSpace(coords, decider, opt, N)
elif len(coords) and len(coords[0]) == 1:
return plot1DUnsupProbabSpace(coords, decider, opt, N)
else:
return plotNDUnsupProbabSpace(coords, decider, opt, N)
def plot2DUnsupProbabSpace(coords, decider, opt="show", N=50):
OVERLAY = 0.2
x_min, x_max = coords[:, 0].min(), coords[:, 0].max()
y_min, y_max = coords[:, 1].min(), coords[:, 1].max()
xo = (x_max - x_min) * OVERLAY
yo = (y_max - y_min) * OVERLAY
x, y = np.linspace(
x_min - xo, x_max + xo, N), np.linspace(y_min - yo, y_max + yo, N)
xx, yy = np.meshgrid(x, y)
# Obtain labels for each point in mesh. Use last trained model.
Z = decider.evaluate(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
# Plot the centroids as a white X
centroids = decider.classifier.cluster_centers_
if opt == "show":
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(coords[:, 0], coords[:, 1], 'k.', markersize=2)
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
return x, y, Z, centroids
def plot1DUnsupProbabSpace(coords, decider, opt, N):
OVERLAY = 0.2
x_min, x_max = coords[:, 0].min(), coords[:, 0].max()
xo = (x_max - x_min) * OVERLAY
x = np.linspace(x_min - xo, x_max + xo, N)
y = decider.evaluate([[xx] for xx in x])
centroids = decider.classifier.cluster_centers_
return x, y, centroids
def plotNDUnsupProbabSpace(coords, decider, opt="show", N=8):
"""
"""
OVERLAY = 0.2
if not isinstance(coords, list):
coords = list(coords)
pca = decomposition.PCA(n_components=2)
pca.fit(coords)
red_coords = pca.transform(coords)
xmax, ymax = np.max(red_coords, axis=0).tolist()
xmin, ymin = np.min(red_coords, axis=0).tolist()
xwidth = xmax-xmin
ywidth = ymax - ymin
x = np.linspace(xmin - xwidth*OVERLAY, xmax + ywidth*OVERLAY, N)
y = np.linspace(ymin - ywidth*OVERLAY, ymax + ywidth*OVERLAY, N)
X, Y = np.meshgrid(x, y)
to_transf = np.c_[X.ravel(), Y.ravel()]
back_transf_data = pca.inverse_transform(to_transf)
z = np.array(decider.evaluate(back_transf_data))
Z = z.reshape(X.shape)
# Plot the centroids as a white X
centroids = pca.transform(decider.classifier.cluster_centers_)
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.pcolor(X, Y, Z)
plt.colorbar()
if opt == "show":
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xmin, xmax, ymin, ymax),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(red_coords[:, 0], red_coords[:, 1], 'k.', markersize=2)
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('')
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.xticks(())
plt.yticks(())
plt.show()
return x, y, Z, centroids, red_coords
| mit |
loli/semisupervisedforests | sklearn/linear_model/coordinate_descent.py | 4 | 72570 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Gael Varoquaux <gael.varoquaux@inria.fr>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y
from ..utils.validation import check_random_state
from ..cross_validation import _check_cv as check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape = (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : bool
Fit or not an intercept
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
alphas = np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
return alphas
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape = (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape = (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already.
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False, fit_intercept=False,
copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
models = []
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape = (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape = (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape = (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F', copy=self.copy_X and self.fit_intercept,
multi_output=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape = (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self).decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape = (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape = (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if not np.may_share_memory(reference_to_old_X.data, X.data):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape = (n_targets,)
independent term in decision function.
mse_path_ : array, shape = (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape = (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape = (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape = (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape = (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape = (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape = (n_tasks,)
Independent term in decision function.
coef_ : array, shape = (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape = (n_samples, n_features)
Data
y : ndarray, shape = (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape = (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape = (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
loli/semisupervisedforests | sklearn/feature_selection/tests/test_rfe.py | 6 | 6754 | """
Testing Recursive feature elimination
"""
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X,y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X,y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X,y)
assert_equal(sel.support_.sum(), n_features // 2)
| bsd-3-clause |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/numpy/lib/tests/test_function_base.py | 19 | 122857 | from __future__ import division, absolute_import, print_function
import operator
import warnings
import sys
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
assert_almost_equal, assert_array_almost_equal, assert_raises,
assert_allclose, assert_array_max_ulp, assert_warns,
assert_raises_regex, dec, suppress_warnings
)
from numpy.testing.utils import HAS_REFCOUNT
import numpy.lib.function_base as nfb
from numpy.random import rand
from numpy.lib import (
add_newdoc_ufunc, angle, average, bartlett, blackman, corrcoef, cov,
delete, diff, digitize, extract, flipud, gradient, hamming, hanning,
histogram, histogramdd, i0, insert, interp, kaiser, meshgrid, msort,
piecewise, place, rot90, select, setxor1d, sinc, split, trapz, trim_zeros,
unwrap, unique, vectorize
)
from numpy.compat import long
def get_mat(n):
data = np.arange(n)
data = np.add.outer(data, data)
return data
class TestRot90(TestCase):
def test_basic(self):
self.assertRaises(ValueError, rot90, np.ones(4))
assert_raises(ValueError, rot90, np.ones((2,2,2)), axes=(0,1,2))
assert_raises(ValueError, rot90, np.ones((2,2)), axes=(0,2))
assert_raises(ValueError, rot90, np.ones((2,2)), axes=(1,1))
assert_raises(ValueError, rot90, np.ones((2,2,2)), axes=(-2,1))
a = [[0, 1, 2],
[3, 4, 5]]
b1 = [[2, 5],
[1, 4],
[0, 3]]
b2 = [[5, 4, 3],
[2, 1, 0]]
b3 = [[3, 0],
[4, 1],
[5, 2]]
b4 = [[0, 1, 2],
[3, 4, 5]]
for k in range(-3, 13, 4):
assert_equal(rot90(a, k=k), b1)
for k in range(-2, 13, 4):
assert_equal(rot90(a, k=k), b2)
for k in range(-1, 13, 4):
assert_equal(rot90(a, k=k), b3)
for k in range(0, 13, 4):
assert_equal(rot90(a, k=k), b4)
assert_equal(rot90(rot90(a, axes=(0,1)), axes=(1,0)), a)
assert_equal(rot90(a, k=1, axes=(1,0)), rot90(a, k=-1, axes=(0,1)))
def test_axes(self):
a = np.ones((50, 40, 3))
assert_equal(rot90(a).shape, (40, 50, 3))
assert_equal(rot90(a, axes=(0,2)), rot90(a, axes=(0,-1)))
assert_equal(rot90(a, axes=(1,2)), rot90(a, axes=(-2,-1)))
def test_rotation_axes(self):
a = np.arange(8).reshape((2,2,2))
a_rot90_01 = [[[2, 3],
[6, 7]],
[[0, 1],
[4, 5]]]
a_rot90_12 = [[[1, 3],
[0, 2]],
[[5, 7],
[4, 6]]]
a_rot90_20 = [[[4, 0],
[6, 2]],
[[5, 1],
[7, 3]]]
a_rot90_10 = [[[4, 5],
[0, 1]],
[[6, 7],
[2, 3]]]
assert_equal(rot90(a, axes=(0, 1)), a_rot90_01)
assert_equal(rot90(a, axes=(1, 0)), a_rot90_10)
assert_equal(rot90(a, axes=(1, 2)), a_rot90_12)
for k in range(1,5):
assert_equal(rot90(a, k=k, axes=(2, 0)),
rot90(a_rot90_20, k=k-1, axes=(2, 0)))
class TestFlip(TestCase):
def test_axes(self):
self.assertRaises(ValueError, np.flip, np.ones(4), axis=1)
self.assertRaises(ValueError, np.flip, np.ones((4, 4)), axis=2)
self.assertRaises(ValueError, np.flip, np.ones((4, 4)), axis=-3)
def test_basic_lr(self):
a = get_mat(4)
b = a[:, ::-1]
assert_equal(np.flip(a, 1), b)
a = [[0, 1, 2],
[3, 4, 5]]
b = [[2, 1, 0],
[5, 4, 3]]
assert_equal(np.flip(a, 1), b)
def test_basic_ud(self):
a = get_mat(4)
b = a[::-1, :]
assert_equal(np.flip(a, 0), b)
a = [[0, 1, 2],
[3, 4, 5]]
b = [[3, 4, 5],
[0, 1, 2]]
assert_equal(np.flip(a, 0), b)
def test_3d_swap_axis0(self):
a = np.array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
b = np.array([[[4, 5],
[6, 7]],
[[0, 1],
[2, 3]]])
assert_equal(np.flip(a, 0), b)
def test_3d_swap_axis1(self):
a = np.array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
b = np.array([[[2, 3],
[0, 1]],
[[6, 7],
[4, 5]]])
assert_equal(np.flip(a, 1), b)
def test_3d_swap_axis2(self):
a = np.array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
b = np.array([[[1, 0],
[3, 2]],
[[5, 4],
[7, 6]]])
assert_equal(np.flip(a, 2), b)
def test_4d(self):
a = np.arange(2 * 3 * 4 * 5).reshape(2, 3, 4, 5)
for i in range(a.ndim):
assert_equal(np.flip(a, i), np.flipud(a.swapaxes(0, i)).swapaxes(i, 0))
class TestAny(TestCase):
def test_basic(self):
y1 = [0, 0, 1, 0]
y2 = [0, 0, 0, 0]
y3 = [1, 0, 1, 0]
assert_(np.any(y1))
assert_(np.any(y3))
assert_(not np.any(y2))
def test_nd(self):
y1 = [[0, 0, 0], [0, 1, 0], [1, 1, 0]]
assert_(np.any(y1))
assert_array_equal(np.sometrue(y1, axis=0), [1, 1, 0])
assert_array_equal(np.sometrue(y1, axis=1), [0, 1, 1])
class TestAll(TestCase):
def test_basic(self):
y1 = [0, 1, 1, 0]
y2 = [0, 0, 0, 0]
y3 = [1, 1, 1, 1]
assert_(not np.all(y1))
assert_(np.all(y3))
assert_(not np.all(y2))
assert_(np.all(~np.array(y2)))
def test_nd(self):
y1 = [[0, 0, 1], [0, 1, 1], [1, 1, 1]]
assert_(not np.all(y1))
assert_array_equal(np.alltrue(y1, axis=0), [0, 0, 1])
assert_array_equal(np.alltrue(y1, axis=1), [0, 0, 1])
class TestCopy(TestCase):
def test_basic(self):
a = np.array([[1, 2], [3, 4]])
a_copy = np.copy(a)
assert_array_equal(a, a_copy)
a_copy[0, 0] = 10
assert_equal(a[0, 0], 1)
assert_equal(a_copy[0, 0], 10)
def test_order(self):
# It turns out that people rely on np.copy() preserving order by
# default; changing this broke scikit-learn:
# https://github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783
a = np.array([[1, 2], [3, 4]])
assert_(a.flags.c_contiguous)
assert_(not a.flags.f_contiguous)
a_fort = np.array([[1, 2], [3, 4]], order="F")
assert_(not a_fort.flags.c_contiguous)
assert_(a_fort.flags.f_contiguous)
a_copy = np.copy(a)
assert_(a_copy.flags.c_contiguous)
assert_(not a_copy.flags.f_contiguous)
a_fort_copy = np.copy(a_fort)
assert_(not a_fort_copy.flags.c_contiguous)
assert_(a_fort_copy.flags.f_contiguous)
class TestAverage(TestCase):
def test_basic(self):
y1 = np.array([1, 2, 3])
assert_(average(y1, axis=0) == 2.)
y2 = np.array([1., 2., 3.])
assert_(average(y2, axis=0) == 2.)
y3 = [0., 0., 0.]
assert_(average(y3, axis=0) == 0.)
y4 = np.ones((4, 4))
y4[0, 1] = 0
y4[1, 0] = 2
assert_almost_equal(y4.mean(0), average(y4, 0))
assert_almost_equal(y4.mean(1), average(y4, 1))
y5 = rand(5, 5)
assert_almost_equal(y5.mean(0), average(y5, 0))
assert_almost_equal(y5.mean(1), average(y5, 1))
y6 = np.matrix(rand(5, 5))
assert_array_equal(y6.mean(0), average(y6, 0))
def test_weights(self):
y = np.arange(10)
w = np.arange(10)
actual = average(y, weights=w)
desired = (np.arange(10) ** 2).sum() * 1. / np.arange(10).sum()
assert_almost_equal(actual, desired)
y1 = np.array([[1, 2, 3], [4, 5, 6]])
w0 = [1, 2]
actual = average(y1, weights=w0, axis=0)
desired = np.array([3., 4., 5.])
assert_almost_equal(actual, desired)
w1 = [0, 0, 1]
actual = average(y1, weights=w1, axis=1)
desired = np.array([3., 6.])
assert_almost_equal(actual, desired)
# This should raise an error. Can we test for that ?
# assert_equal(average(y1, weights=w1), 9./2.)
# 2D Case
w2 = [[0, 0, 1], [0, 0, 2]]
desired = np.array([3., 6.])
assert_array_equal(average(y1, weights=w2, axis=1), desired)
assert_equal(average(y1, weights=w2), 5.)
y3 = rand(5).astype(np.float32)
w3 = rand(5).astype(np.float64)
assert_(np.average(y3, weights=w3).dtype == np.result_type(y3, w3))
def test_returned(self):
y = np.array([[1, 2, 3], [4, 5, 6]])
# No weights
avg, scl = average(y, returned=True)
assert_equal(scl, 6.)
avg, scl = average(y, 0, returned=True)
assert_array_equal(scl, np.array([2., 2., 2.]))
avg, scl = average(y, 1, returned=True)
assert_array_equal(scl, np.array([3., 3.]))
# With weights
w0 = [1, 2]
avg, scl = average(y, weights=w0, axis=0, returned=True)
assert_array_equal(scl, np.array([3., 3., 3.]))
w1 = [1, 2, 3]
avg, scl = average(y, weights=w1, axis=1, returned=True)
assert_array_equal(scl, np.array([6., 6.]))
w2 = [[0, 0, 1], [1, 2, 3]]
avg, scl = average(y, weights=w2, axis=1, returned=True)
assert_array_equal(scl, np.array([1., 6.]))
def test_subclasses(self):
class subclass(np.ndarray):
pass
a = np.array([[1,2],[3,4]]).view(subclass)
w = np.array([[1,2],[3,4]]).view(subclass)
with suppress_warnings() as sup:
# Note that the warning is spurious, because the test checks
# for weights while a is ignored.
sup.filter(FutureWarning, "np.average currently does not preserve")
assert_equal(type(np.average(a, weights=w)), subclass)
# also test matrices
a = np.matrix([[1,2],[3,4]])
w = np.matrix([[1,2],[3,4]])
r = np.average(a, axis=0, weights=w)
assert_equal(type(r), np.matrix)
assert_equal(r, [[2.5, 10.0/3]])
def test_upcasting(self):
types = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'),
('f4', 'f4', 'f4'), ('f4', 'f8', 'f8')]
for at, wt, rt in types:
a = np.array([[1,2],[3,4]], dtype=at)
w = np.array([[1,2],[3,4]], dtype=wt)
assert_equal(np.average(a, weights=w).dtype, np.dtype(rt))
class TestSelect(TestCase):
choices = [np.array([1, 2, 3]),
np.array([4, 5, 6]),
np.array([7, 8, 9])]
conditions = [np.array([False, False, False]),
np.array([False, True, False]),
np.array([False, False, True])]
def _select(self, cond, values, default=0):
output = []
for m in range(len(cond)):
output += [V[m] for V, C in zip(values, cond) if C[m]] or [default]
return output
def test_basic(self):
choices = self.choices
conditions = self.conditions
assert_array_equal(select(conditions, choices, default=15),
self._select(conditions, choices, default=15))
assert_equal(len(choices), 3)
assert_equal(len(conditions), 3)
def test_broadcasting(self):
conditions = [np.array(True), np.array([False, True, False])]
choices = [1, np.arange(12).reshape(4, 3)]
assert_array_equal(select(conditions, choices), np.ones((4, 3)))
# default can broadcast too:
assert_equal(select([True], [0], default=[0]).shape, (1,))
def test_return_dtype(self):
assert_equal(select(self.conditions, self.choices, 1j).dtype,
np.complex_)
# But the conditions need to be stronger then the scalar default
# if it is scalar.
choices = [choice.astype(np.int8) for choice in self.choices]
assert_equal(select(self.conditions, choices).dtype, np.int8)
d = np.array([1, 2, 3, np.nan, 5, 7])
m = np.isnan(d)
assert_equal(select([m], [d]), [0, 0, 0, np.nan, 0, 0])
def test_deprecated_empty(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
assert_equal(select([], [], 3j), 3j)
with warnings.catch_warnings():
warnings.simplefilter("always")
assert_warns(DeprecationWarning, select, [], [])
warnings.simplefilter("error")
assert_raises(DeprecationWarning, select, [], [])
def test_non_bool_deprecation(self):
choices = self.choices
conditions = self.conditions[:]
with warnings.catch_warnings():
warnings.filterwarnings("always")
conditions[0] = conditions[0].astype(np.int_)
assert_warns(DeprecationWarning, select, conditions, choices)
conditions[0] = conditions[0].astype(np.uint8)
assert_warns(DeprecationWarning, select, conditions, choices)
warnings.filterwarnings("error")
assert_raises(DeprecationWarning, select, conditions, choices)
def test_many_arguments(self):
# This used to be limited by NPY_MAXARGS == 32
conditions = [np.array([False])] * 100
choices = [np.array([1])] * 100
select(conditions, choices)
class TestInsert(TestCase):
def test_basic(self):
a = [1, 2, 3]
assert_equal(insert(a, 0, 1), [1, 1, 2, 3])
assert_equal(insert(a, 3, 1), [1, 2, 3, 1])
assert_equal(insert(a, [1, 1, 1], [1, 2, 3]), [1, 1, 2, 3, 2, 3])
assert_equal(insert(a, 1, [1, 2, 3]), [1, 1, 2, 3, 2, 3])
assert_equal(insert(a, [1, -1, 3], 9), [1, 9, 2, 9, 3, 9])
assert_equal(insert(a, slice(-1, None, -1), 9), [9, 1, 9, 2, 9, 3])
assert_equal(insert(a, [-1, 1, 3], [7, 8, 9]), [1, 8, 2, 7, 3, 9])
b = np.array([0, 1], dtype=np.float64)
assert_equal(insert(b, 0, b[0]), [0., 0., 1.])
assert_equal(insert(b, [], []), b)
# Bools will be treated differently in the future:
# assert_equal(insert(a, np.array([True]*4), 9), [9, 1, 9, 2, 9, 3, 9])
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', FutureWarning)
assert_equal(
insert(a, np.array([True] * 4), 9), [1, 9, 9, 9, 9, 2, 3])
assert_(w[0].category is FutureWarning)
def test_multidim(self):
a = [[1, 1, 1]]
r = [[2, 2, 2],
[1, 1, 1]]
assert_equal(insert(a, 0, [1]), [1, 1, 1, 1])
assert_equal(insert(a, 0, [2, 2, 2], axis=0), r)
assert_equal(insert(a, 0, 2, axis=0), r)
assert_equal(insert(a, 2, 2, axis=1), [[1, 1, 2, 1]])
a = np.array([[1, 1], [2, 2], [3, 3]])
b = np.arange(1, 4).repeat(3).reshape(3, 3)
c = np.concatenate(
(a[:, 0:1], np.arange(1, 4).repeat(3).reshape(3, 3).T,
a[:, 1:2]), axis=1)
assert_equal(insert(a, [1], [[1], [2], [3]], axis=1), b)
assert_equal(insert(a, [1], [1, 2, 3], axis=1), c)
# scalars behave differently, in this case exactly opposite:
assert_equal(insert(a, 1, [1, 2, 3], axis=1), b)
assert_equal(insert(a, 1, [[1], [2], [3]], axis=1), c)
a = np.arange(4).reshape(2, 2)
assert_equal(insert(a[:, :1], 1, a[:, 1], axis=1), a)
assert_equal(insert(a[:1,:], 1, a[1,:], axis=0), a)
# negative axis value
a = np.arange(24).reshape((2, 3, 4))
assert_equal(insert(a, 1, a[:,:, 3], axis=-1),
insert(a, 1, a[:,:, 3], axis=2))
assert_equal(insert(a, 1, a[:, 2,:], axis=-2),
insert(a, 1, a[:, 2,:], axis=1))
# invalid axis value
assert_raises(IndexError, insert, a, 1, a[:, 2, :], axis=3)
assert_raises(IndexError, insert, a, 1, a[:, 2, :], axis=-4)
# negative axis value
a = np.arange(24).reshape((2, 3, 4))
assert_equal(insert(a, 1, a[:, :, 3], axis=-1),
insert(a, 1, a[:, :, 3], axis=2))
assert_equal(insert(a, 1, a[:, 2, :], axis=-2),
insert(a, 1, a[:, 2, :], axis=1))
def test_0d(self):
# This is an error in the future
a = np.array(1)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', DeprecationWarning)
assert_equal(insert(a, [], 2, axis=0), np.array(2))
assert_(w[0].category is DeprecationWarning)
def test_subclass(self):
class SubClass(np.ndarray):
pass
a = np.arange(10).view(SubClass)
assert_(isinstance(np.insert(a, 0, [0]), SubClass))
assert_(isinstance(np.insert(a, [], []), SubClass))
assert_(isinstance(np.insert(a, [0, 1], [1, 2]), SubClass))
assert_(isinstance(np.insert(a, slice(1, 2), [1, 2]), SubClass))
assert_(isinstance(np.insert(a, slice(1, -2, -1), []), SubClass))
# This is an error in the future:
a = np.array(1).view(SubClass)
assert_(isinstance(np.insert(a, 0, [0]), SubClass))
def test_index_array_copied(self):
x = np.array([1, 1, 1])
np.insert([0, 1, 2], x, [3, 4, 5])
assert_equal(x, np.array([1, 1, 1]))
def test_structured_array(self):
a = np.array([(1, 'a'), (2, 'b'), (3, 'c')],
dtype=[('foo', 'i'), ('bar', 'a1')])
val = (4, 'd')
b = np.insert(a, 0, val)
assert_array_equal(b[0], np.array(val, dtype=b.dtype))
val = [(4, 'd')] * 2
b = np.insert(a, [0, 2], val)
assert_array_equal(b[[0, 3]], np.array(val, dtype=b.dtype))
class TestAmax(TestCase):
def test_basic(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
assert_equal(np.amax(a), 10.0)
b = [[3, 6.0, 9.0],
[4, 10.0, 5.0],
[8, 3.0, 2.0]]
assert_equal(np.amax(b, axis=0), [8.0, 10.0, 9.0])
assert_equal(np.amax(b, axis=1), [9.0, 10.0, 8.0])
class TestAmin(TestCase):
def test_basic(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
assert_equal(np.amin(a), -5.0)
b = [[3, 6.0, 9.0],
[4, 10.0, 5.0],
[8, 3.0, 2.0]]
assert_equal(np.amin(b, axis=0), [3.0, 3.0, 2.0])
assert_equal(np.amin(b, axis=1), [3.0, 4.0, 2.0])
class TestPtp(TestCase):
def test_basic(self):
a = np.array([3, 4, 5, 10, -3, -5, 6.0])
assert_equal(a.ptp(axis=0), 15.0)
b = np.array([[3, 6.0, 9.0],
[4, 10.0, 5.0],
[8, 3.0, 2.0]])
assert_equal(b.ptp(axis=0), [5.0, 7.0, 7.0])
assert_equal(b.ptp(axis=-1), [6.0, 6.0, 6.0])
class TestCumsum(TestCase):
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int8, np.uint8, np.int16, np.uint16, np.int32,
np.uint32, np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
tgt = np.array([1, 3, 13, 24, 30, 35, 39], ctype)
assert_array_equal(np.cumsum(a, axis=0), tgt)
tgt = np.array(
[[1, 2, 3, 4], [6, 8, 10, 13], [16, 11, 14, 18]], ctype)
assert_array_equal(np.cumsum(a2, axis=0), tgt)
tgt = np.array(
[[1, 3, 6, 10], [5, 11, 18, 27], [10, 13, 17, 22]], ctype)
assert_array_equal(np.cumsum(a2, axis=1), tgt)
class TestProd(TestCase):
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
self.assertRaises(ArithmeticError, np.prod, a)
self.assertRaises(ArithmeticError, np.prod, a2, 1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
class TestCumprod(TestCase):
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
self.assertRaises(ArithmeticError, np.cumprod, a)
self.assertRaises(ArithmeticError, np.cumprod, a2, 1)
self.assertRaises(ArithmeticError, np.cumprod, a)
else:
assert_array_equal(np.cumprod(a, axis=-1),
np.array([1, 2, 20, 220,
1320, 6600, 26400], ctype))
assert_array_equal(np.cumprod(a2, axis=0),
np.array([[1, 2, 3, 4],
[5, 12, 21, 36],
[50, 36, 84, 180]], ctype))
assert_array_equal(np.cumprod(a2, axis=-1),
np.array([[1, 2, 6, 24],
[5, 30, 210, 1890],
[10, 30, 120, 600]], ctype))
class TestDiff(TestCase):
def test_basic(self):
x = [1, 4, 6, 7, 12]
out = np.array([3, 2, 1, 5])
out2 = np.array([-1, -1, 4])
out3 = np.array([0, 5])
assert_array_equal(diff(x), out)
assert_array_equal(diff(x, n=2), out2)
assert_array_equal(diff(x, n=3), out3)
def test_nd(self):
x = 20 * rand(10, 20, 30)
out1 = x[:, :, 1:] - x[:, :, :-1]
out2 = out1[:, :, 1:] - out1[:, :, :-1]
out3 = x[1:, :, :] - x[:-1, :, :]
out4 = out3[1:, :, :] - out3[:-1, :, :]
assert_array_equal(diff(x), out1)
assert_array_equal(diff(x, n=2), out2)
assert_array_equal(diff(x, axis=0), out3)
assert_array_equal(diff(x, n=2, axis=0), out4)
class TestDelete(TestCase):
def setUp(self):
self.a = np.arange(5)
self.nd_a = np.arange(5).repeat(2).reshape(1, 5, 2)
def _check_inverse_of_slicing(self, indices):
a_del = delete(self.a, indices)
nd_a_del = delete(self.nd_a, indices, axis=1)
msg = 'Delete failed for obj: %r' % indices
# NOTE: The cast should be removed after warning phase for bools
if not isinstance(indices, (slice, int, long, np.integer)):
indices = np.asarray(indices, dtype=np.intp)
indices = indices[(indices >= 0) & (indices < 5)]
assert_array_equal(setxor1d(a_del, self.a[indices, ]), self.a,
err_msg=msg)
xor = setxor1d(nd_a_del[0,:, 0], self.nd_a[0, indices, 0])
assert_array_equal(xor, self.nd_a[0,:, 0], err_msg=msg)
def test_slices(self):
lims = [-6, -2, 0, 1, 2, 4, 5]
steps = [-3, -1, 1, 3]
for start in lims:
for stop in lims:
for step in steps:
s = slice(start, stop, step)
self._check_inverse_of_slicing(s)
def test_fancy(self):
# Deprecation/FutureWarning tests should be kept after change.
self._check_inverse_of_slicing(np.array([[0, 1], [2, 1]]))
with warnings.catch_warnings():
warnings.filterwarnings('error', category=DeprecationWarning)
assert_raises(DeprecationWarning, delete, self.a, [100])
assert_raises(DeprecationWarning, delete, self.a, [-100])
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', category=FutureWarning)
self._check_inverse_of_slicing([0, -1, 2, 2])
obj = np.array([True, False, False], dtype=bool)
self._check_inverse_of_slicing(obj)
assert_(w[0].category is FutureWarning)
assert_(w[1].category is FutureWarning)
def test_single(self):
self._check_inverse_of_slicing(0)
self._check_inverse_of_slicing(-4)
def test_0d(self):
a = np.array(1)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', DeprecationWarning)
assert_equal(delete(a, [], axis=0), a)
assert_(w[0].category is DeprecationWarning)
def test_subclass(self):
class SubClass(np.ndarray):
pass
a = self.a.view(SubClass)
assert_(isinstance(delete(a, 0), SubClass))
assert_(isinstance(delete(a, []), SubClass))
assert_(isinstance(delete(a, [0, 1]), SubClass))
assert_(isinstance(delete(a, slice(1, 2)), SubClass))
assert_(isinstance(delete(a, slice(1, -2)), SubClass))
def test_array_order_preserve(self):
# See gh-7113
k = np.arange(10).reshape(2, 5, order='F')
m = delete(k, slice(60, None), axis=1)
# 'k' is Fortran ordered, and 'm' should have the
# same ordering as 'k' and NOT become C ordered
assert_equal(m.flags.c_contiguous, k.flags.c_contiguous)
assert_equal(m.flags.f_contiguous, k.flags.f_contiguous)
class TestGradient(TestCase):
def test_basic(self):
v = [[1, 1], [3, 4]]
x = np.array(v)
dx = [np.array([[2., 3.], [2., 3.]]),
np.array([[0., 0.], [1., 1.]])]
assert_array_equal(gradient(x), dx)
assert_array_equal(gradient(v), dx)
def test_badargs(self):
# for 2D array, gradient can take 0, 1, or 2 extra args
x = np.array([[1, 1], [3, 4]])
assert_raises(SyntaxError, gradient, x, np.array([1., 1.]),
np.array([1., 1.]), np.array([1., 1.]))
# disallow arrays as distances, see gh-6847
assert_raises(ValueError, gradient, np.arange(5), np.ones(5))
def test_masked(self):
# Make sure that gradient supports subclasses like masked arrays
x = np.ma.array([[1, 1], [3, 4]],
mask=[[False, False], [False, False]])
out = gradient(x)[0]
assert_equal(type(out), type(x))
# And make sure that the output and input don't have aliased mask
# arrays
assert_(x.mask is not out.mask)
# Also check that edge_order=2 doesn't alter the original mask
x2 = np.ma.arange(5)
x2[2] = np.ma.masked
np.gradient(x2, edge_order=2)
assert_array_equal(x2.mask, [False, False, True, False, False])
def test_datetime64(self):
# Make sure gradient() can handle special types like datetime64
x = np.array(
['1910-08-16', '1910-08-11', '1910-08-10', '1910-08-12',
'1910-10-12', '1910-12-12', '1912-12-12'],
dtype='datetime64[D]')
dx = np.array(
[-5, -3, 0, 31, 61, 396, 731],
dtype='timedelta64[D]')
assert_array_equal(gradient(x), dx)
assert_(dx.dtype == np.dtype('timedelta64[D]'))
def test_timedelta64(self):
# Make sure gradient() can handle special types like timedelta64
x = np.array(
[-5, -3, 10, 12, 61, 321, 300],
dtype='timedelta64[D]')
dx = np.array(
[2, 7, 7, 25, 154, 119, -21],
dtype='timedelta64[D]')
assert_array_equal(gradient(x), dx)
assert_(dx.dtype == np.dtype('timedelta64[D]'))
def test_second_order_accurate(self):
# Testing that the relative numerical error is less that 3% for
# this example problem. This corresponds to second order
# accurate finite differences for all interior and boundary
# points.
x = np.linspace(0, 1, 10)
dx = x[1] - x[0]
y = 2 * x ** 3 + 4 * x ** 2 + 2 * x
analytical = 6 * x ** 2 + 8 * x + 2
num_error = np.abs((np.gradient(y, dx, edge_order=2) / analytical) - 1)
assert_(np.all(num_error < 0.03) == True)
def test_specific_axes(self):
# Testing that gradient can work on a given axis only
v = [[1, 1], [3, 4]]
x = np.array(v)
dx = [np.array([[2., 3.], [2., 3.]]),
np.array([[0., 0.], [1., 1.]])]
assert_array_equal(gradient(x, axis=0), dx[0])
assert_array_equal(gradient(x, axis=1), dx[1])
assert_array_equal(gradient(x, axis=-1), dx[1])
assert_array_equal(gradient(x, axis=(1, 0)), [dx[1], dx[0]])
# test axis=None which means all axes
assert_almost_equal(gradient(x, axis=None), [dx[0], dx[1]])
# and is the same as no axis keyword given
assert_almost_equal(gradient(x, axis=None), gradient(x))
# test vararg order
assert_array_equal(gradient(x, 2, 3, axis=(1, 0)), [dx[1]/2.0, dx[0]/3.0])
# test maximal number of varargs
assert_raises(SyntaxError, gradient, x, 1, 2, axis=1)
assert_raises(ValueError, gradient, x, axis=3)
assert_raises(ValueError, gradient, x, axis=-3)
assert_raises(TypeError, gradient, x, axis=[1,])
class TestAngle(TestCase):
def test_basic(self):
x = [1 + 3j, np.sqrt(2) / 2.0 + 1j * np.sqrt(2) / 2,
1, 1j, -1, -1j, 1 - 3j, -1 + 3j]
y = angle(x)
yo = [
np.arctan(3.0 / 1.0),
np.arctan(1.0), 0, np.pi / 2, np.pi, -np.pi / 2.0,
-np.arctan(3.0 / 1.0), np.pi - np.arctan(3.0 / 1.0)]
z = angle(x, deg=1)
zo = np.array(yo) * 180 / np.pi
assert_array_almost_equal(y, yo, 11)
assert_array_almost_equal(z, zo, 11)
class TestTrimZeros(TestCase):
"""
Only testing for integer splits.
"""
def test_basic(self):
a = np.array([0, 0, 1, 2, 3, 4, 0])
res = trim_zeros(a)
assert_array_equal(res, np.array([1, 2, 3, 4]))
def test_leading_skip(self):
a = np.array([0, 0, 1, 0, 2, 3, 4, 0])
res = trim_zeros(a)
assert_array_equal(res, np.array([1, 0, 2, 3, 4]))
def test_trailing_skip(self):
a = np.array([0, 0, 1, 0, 2, 3, 0, 4, 0])
res = trim_zeros(a)
assert_array_equal(res, np.array([1, 0, 2, 3, 0, 4]))
class TestExtins(TestCase):
def test_basic(self):
a = np.array([1, 3, 2, 1, 2, 3, 3])
b = extract(a > 1, a)
assert_array_equal(b, [3, 2, 2, 3, 3])
def test_place(self):
# Make sure that non-np.ndarray objects
# raise an error instead of doing nothing
assert_raises(TypeError, place, [1, 2, 3], [True, False], [0, 1])
a = np.array([1, 4, 3, 2, 5, 8, 7])
place(a, [0, 1, 0, 1, 0, 1, 0], [2, 4, 6])
assert_array_equal(a, [1, 2, 3, 4, 5, 6, 7])
place(a, np.zeros(7), [])
assert_array_equal(a, np.arange(1, 8))
place(a, [1, 0, 1, 0, 1, 0, 1], [8, 9])
assert_array_equal(a, [8, 2, 9, 4, 8, 6, 9])
assert_raises_regex(ValueError, "Cannot insert from an empty array",
lambda: place(a, [0, 0, 0, 0, 0, 1, 0], []))
# See Issue #6974
a = np.array(['12', '34'])
place(a, [0, 1], '9')
assert_array_equal(a, ['12', '9'])
def test_both(self):
a = rand(10)
mask = a > 0.5
ac = a.copy()
c = extract(mask, a)
place(a, mask, 0)
place(a, mask, c)
assert_array_equal(a, ac)
class TestVectorize(TestCase):
def test_simple(self):
def addsubtract(a, b):
if a > b:
return a - b
else:
return a + b
f = vectorize(addsubtract)
r = f([0, 3, 6, 9], [1, 3, 5, 7])
assert_array_equal(r, [1, 6, 1, 2])
def test_scalar(self):
def addsubtract(a, b):
if a > b:
return a - b
else:
return a + b
f = vectorize(addsubtract)
r = f([0, 3, 6, 9], 5)
assert_array_equal(r, [5, 8, 1, 4])
def test_large(self):
x = np.linspace(-3, 2, 10000)
f = vectorize(lambda x: x)
y = f(x)
assert_array_equal(y, x)
def test_ufunc(self):
import math
f = vectorize(math.cos)
args = np.array([0, 0.5 * np.pi, np.pi, 1.5 * np.pi, 2 * np.pi])
r1 = f(args)
r2 = np.cos(args)
assert_array_almost_equal(r1, r2)
def test_keywords(self):
def foo(a, b=1):
return a + b
f = vectorize(foo)
args = np.array([1, 2, 3])
r1 = f(args)
r2 = np.array([2, 3, 4])
assert_array_equal(r1, r2)
r1 = f(args, 2)
r2 = np.array([3, 4, 5])
assert_array_equal(r1, r2)
def test_keywords_no_func_code(self):
# This needs to test a function that has keywords but
# no func_code attribute, since otherwise vectorize will
# inspect the func_code.
import random
try:
vectorize(random.randrange) # Should succeed
except:
raise AssertionError()
def test_keywords2_ticket_2100(self):
# Test kwarg support: enhancement ticket 2100
def foo(a, b=1):
return a + b
f = vectorize(foo)
args = np.array([1, 2, 3])
r1 = f(a=args)
r2 = np.array([2, 3, 4])
assert_array_equal(r1, r2)
r1 = f(b=1, a=args)
assert_array_equal(r1, r2)
r1 = f(args, b=2)
r2 = np.array([3, 4, 5])
assert_array_equal(r1, r2)
def test_keywords3_ticket_2100(self):
# Test excluded with mixed positional and kwargs: ticket 2100
def mypolyval(x, p):
_p = list(p)
res = _p.pop(0)
while _p:
res = res * x + _p.pop(0)
return res
vpolyval = np.vectorize(mypolyval, excluded=['p', 1])
ans = [3, 6]
assert_array_equal(ans, vpolyval(x=[0, 1], p=[1, 2, 3]))
assert_array_equal(ans, vpolyval([0, 1], p=[1, 2, 3]))
assert_array_equal(ans, vpolyval([0, 1], [1, 2, 3]))
def test_keywords4_ticket_2100(self):
# Test vectorizing function with no positional args.
@vectorize
def f(**kw):
res = 1.0
for _k in kw:
res *= kw[_k]
return res
assert_array_equal(f(a=[1, 2], b=[3, 4]), [3, 8])
def test_keywords5_ticket_2100(self):
# Test vectorizing function with no kwargs args.
@vectorize
def f(*v):
return np.prod(v)
assert_array_equal(f([1, 2], [3, 4]), [3, 8])
def test_coverage1_ticket_2100(self):
def foo():
return 1
f = vectorize(foo)
assert_array_equal(f(), 1)
def test_assigning_docstring(self):
def foo(x):
"""Original documentation"""
return x
f = vectorize(foo)
assert_equal(f.__doc__, foo.__doc__)
doc = "Provided documentation"
f = vectorize(foo, doc=doc)
assert_equal(f.__doc__, doc)
def test_UnboundMethod_ticket_1156(self):
# Regression test for issue 1156
class Foo:
b = 2
def bar(self, a):
return a ** self.b
assert_array_equal(vectorize(Foo().bar)(np.arange(9)),
np.arange(9) ** 2)
assert_array_equal(vectorize(Foo.bar)(Foo(), np.arange(9)),
np.arange(9) ** 2)
def test_execution_order_ticket_1487(self):
# Regression test for dependence on execution order: issue 1487
f1 = vectorize(lambda x: x)
res1a = f1(np.arange(3))
res1b = f1(np.arange(0.1, 3))
f2 = vectorize(lambda x: x)
res2b = f2(np.arange(0.1, 3))
res2a = f2(np.arange(3))
assert_equal(res1a, res2a)
assert_equal(res1b, res2b)
def test_string_ticket_1892(self):
# Test vectorization over strings: issue 1892.
f = np.vectorize(lambda x: x)
s = '0123456789' * 10
assert_equal(s, f(s))
def test_cache(self):
# Ensure that vectorized func called exactly once per argument.
_calls = [0]
@vectorize
def f(x):
_calls[0] += 1
return x ** 2
f.cache = True
x = np.arange(5)
assert_array_equal(f(x), x * x)
assert_equal(_calls[0], len(x))
def test_otypes(self):
f = np.vectorize(lambda x: x)
f.otypes = 'i'
x = np.arange(5)
assert_array_equal(f(x), x)
def test_parse_gufunc_signature(self):
assert_equal(nfb._parse_gufunc_signature('(x)->()'), ([('x',)], [()]))
assert_equal(nfb._parse_gufunc_signature('(x,y)->()'),
([('x', 'y')], [()]))
assert_equal(nfb._parse_gufunc_signature('(x),(y)->()'),
([('x',), ('y',)], [()]))
assert_equal(nfb._parse_gufunc_signature('(x)->(y)'),
([('x',)], [('y',)]))
assert_equal(nfb._parse_gufunc_signature('(x)->(y),()'),
([('x',)], [('y',), ()]))
assert_equal(nfb._parse_gufunc_signature('(),(a,b,c),(d)->(d,e)'),
([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')]))
with assert_raises(ValueError):
nfb._parse_gufunc_signature('(x)(y)->()')
with assert_raises(ValueError):
nfb._parse_gufunc_signature('(x),(y)->')
with assert_raises(ValueError):
nfb._parse_gufunc_signature('((x))->(x)')
def test_signature_simple(self):
def addsubtract(a, b):
if a > b:
return a - b
else:
return a + b
f = vectorize(addsubtract, signature='(),()->()')
r = f([0, 3, 6, 9], [1, 3, 5, 7])
assert_array_equal(r, [1, 6, 1, 2])
def test_signature_mean_last(self):
def mean(a):
return a.mean()
f = vectorize(mean, signature='(n)->()')
r = f([[1, 3], [2, 4]])
assert_array_equal(r, [2, 3])
def test_signature_center(self):
def center(a):
return a - a.mean()
f = vectorize(center, signature='(n)->(n)')
r = f([[1, 3], [2, 4]])
assert_array_equal(r, [[-1, 1], [-1, 1]])
def test_signature_two_outputs(self):
f = vectorize(lambda x: (x, x), signature='()->(),()')
r = f([1, 2, 3])
assert_(isinstance(r, tuple) and len(r) == 2)
assert_array_equal(r[0], [1, 2, 3])
assert_array_equal(r[1], [1, 2, 3])
def test_signature_outer(self):
f = vectorize(np.outer, signature='(a),(b)->(a,b)')
r = f([1, 2], [1, 2, 3])
assert_array_equal(r, [[1, 2, 3], [2, 4, 6]])
r = f([[[1, 2]]], [1, 2, 3])
assert_array_equal(r, [[[[1, 2, 3], [2, 4, 6]]]])
r = f([[1, 0], [2, 0]], [1, 2, 3])
assert_array_equal(r, [[[1, 2, 3], [0, 0, 0]],
[[2, 4, 6], [0, 0, 0]]])
r = f([1, 2], [[1, 2, 3], [0, 0, 0]])
assert_array_equal(r, [[[1, 2, 3], [2, 4, 6]],
[[0, 0, 0], [0, 0, 0]]])
def test_signature_computed_size(self):
f = vectorize(lambda x: x[:-1], signature='(n)->(m)')
r = f([1, 2, 3])
assert_array_equal(r, [1, 2])
r = f([[1, 2, 3], [2, 3, 4]])
assert_array_equal(r, [[1, 2], [2, 3]])
def test_signature_excluded(self):
def foo(a, b=1):
return a + b
f = vectorize(foo, signature='()->()', excluded={'b'})
assert_array_equal(f([1, 2, 3]), [2, 3, 4])
assert_array_equal(f([1, 2, 3], b=0), [1, 2, 3])
def test_signature_otypes(self):
f = vectorize(lambda x: x, signature='(n)->(n)', otypes=['float64'])
r = f([1, 2, 3])
assert_equal(r.dtype, np.dtype('float64'))
assert_array_equal(r, [1, 2, 3])
def test_signature_invalid_inputs(self):
f = vectorize(operator.add, signature='(n),(n)->(n)')
with assert_raises_regex(TypeError, 'wrong number of positional'):
f([1, 2])
with assert_raises_regex(
ValueError, 'does not have enough dimensions'):
f(1, 2)
with assert_raises_regex(
ValueError, 'inconsistent size for core dimension'):
f([1, 2], [1, 2, 3])
f = vectorize(operator.add, signature='()->()')
with assert_raises_regex(TypeError, 'wrong number of positional'):
f(1, 2)
def test_signature_invalid_outputs(self):
f = vectorize(lambda x: x[:-1], signature='(n)->(n)')
with assert_raises_regex(
ValueError, 'inconsistent size for core dimension'):
f([1, 2, 3])
f = vectorize(lambda x: x, signature='()->(),()')
with assert_raises_regex(ValueError, 'wrong number of outputs'):
f(1)
f = vectorize(lambda x: (x, x), signature='()->()')
with assert_raises_regex(ValueError, 'wrong number of outputs'):
f([1, 2])
def test_size_zero_output(self):
# see issue 5868
f = np.vectorize(lambda x: x)
x = np.zeros([0, 5], dtype=int)
with assert_raises_regex(ValueError, 'otypes'):
f(x)
f.otypes = 'i'
assert_array_equal(f(x), x)
f = np.vectorize(lambda x: x, signature='()->()')
with assert_raises_regex(ValueError, 'otypes'):
f(x)
f = np.vectorize(lambda x: x, signature='()->()', otypes='i')
assert_array_equal(f(x), x)
f = np.vectorize(lambda x: x, signature='(n)->(n)', otypes='i')
assert_array_equal(f(x), x)
f = np.vectorize(lambda x: x, signature='(n)->(n)')
assert_array_equal(f(x.T), x.T)
f = np.vectorize(lambda x: [x], signature='()->(n)', otypes='i')
with assert_raises_regex(ValueError, 'new output dimensions'):
f(x)
class TestDigitize(TestCase):
def test_forward(self):
x = np.arange(-6, 5)
bins = np.arange(-5, 5)
assert_array_equal(digitize(x, bins), np.arange(11))
def test_reverse(self):
x = np.arange(5, -6, -1)
bins = np.arange(5, -5, -1)
assert_array_equal(digitize(x, bins), np.arange(11))
def test_random(self):
x = rand(10)
bin = np.linspace(x.min(), x.max(), 10)
assert_(np.all(digitize(x, bin) != 0))
def test_right_basic(self):
x = [1, 5, 4, 10, 8, 11, 0]
bins = [1, 5, 10]
default_answer = [1, 2, 1, 3, 2, 3, 0]
assert_array_equal(digitize(x, bins), default_answer)
right_answer = [0, 1, 1, 2, 2, 3, 0]
assert_array_equal(digitize(x, bins, True), right_answer)
def test_right_open(self):
x = np.arange(-6, 5)
bins = np.arange(-6, 4)
assert_array_equal(digitize(x, bins, True), np.arange(11))
def test_right_open_reverse(self):
x = np.arange(5, -6, -1)
bins = np.arange(4, -6, -1)
assert_array_equal(digitize(x, bins, True), np.arange(11))
def test_right_open_random(self):
x = rand(10)
bins = np.linspace(x.min(), x.max(), 10)
assert_(np.all(digitize(x, bins, True) != 10))
def test_monotonic(self):
x = [-1, 0, 1, 2]
bins = [0, 0, 1]
assert_array_equal(digitize(x, bins, False), [0, 2, 3, 3])
assert_array_equal(digitize(x, bins, True), [0, 0, 2, 3])
bins = [1, 1, 0]
assert_array_equal(digitize(x, bins, False), [3, 2, 0, 0])
assert_array_equal(digitize(x, bins, True), [3, 3, 2, 0])
bins = [1, 1, 1, 1]
assert_array_equal(digitize(x, bins, False), [0, 0, 4, 4])
assert_array_equal(digitize(x, bins, True), [0, 0, 0, 4])
bins = [0, 0, 1, 0]
assert_raises(ValueError, digitize, x, bins)
bins = [1, 1, 0, 1]
assert_raises(ValueError, digitize, x, bins)
def test_casting_error(self):
x = [1, 2, 3 + 1.j]
bins = [1, 2, 3]
assert_raises(TypeError, digitize, x, bins)
x, bins = bins, x
assert_raises(TypeError, digitize, x, bins)
def test_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
assert_(not isinstance(digitize(b, a, False), A))
assert_(not isinstance(digitize(b, a, True), A))
class TestUnwrap(TestCase):
def test_simple(self):
# check that unwrap removes jumps greather that 2*pi
assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1])
# check that unwrap maintans continuity
assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi))
class TestFilterwindows(TestCase):
def test_hanning(self):
# check symmetry
w = hanning(10)
assert_array_almost_equal(w, flipud(w), 7)
# check known value
assert_almost_equal(np.sum(w, axis=0), 4.500, 4)
def test_hamming(self):
# check symmetry
w = hamming(10)
assert_array_almost_equal(w, flipud(w), 7)
# check known value
assert_almost_equal(np.sum(w, axis=0), 4.9400, 4)
def test_bartlett(self):
# check symmetry
w = bartlett(10)
assert_array_almost_equal(w, flipud(w), 7)
# check known value
assert_almost_equal(np.sum(w, axis=0), 4.4444, 4)
def test_blackman(self):
# check symmetry
w = blackman(10)
assert_array_almost_equal(w, flipud(w), 7)
# check known value
assert_almost_equal(np.sum(w, axis=0), 3.7800, 4)
class TestTrapz(TestCase):
def test_simple(self):
x = np.arange(-10, 10, .1)
r = trapz(np.exp(-.5 * x ** 2) / np.sqrt(2 * np.pi), dx=0.1)
# check integral of normal equals 1
assert_almost_equal(r, 1, 7)
def test_ndim(self):
x = np.linspace(0, 1, 3)
y = np.linspace(0, 2, 8)
z = np.linspace(0, 3, 13)
wx = np.ones_like(x) * (x[1] - x[0])
wx[0] /= 2
wx[-1] /= 2
wy = np.ones_like(y) * (y[1] - y[0])
wy[0] /= 2
wy[-1] /= 2
wz = np.ones_like(z) * (z[1] - z[0])
wz[0] /= 2
wz[-1] /= 2
q = x[:, None, None] + y[None,:, None] + z[None, None,:]
qx = (q * wx[:, None, None]).sum(axis=0)
qy = (q * wy[None, :, None]).sum(axis=1)
qz = (q * wz[None, None, :]).sum(axis=2)
# n-d `x`
r = trapz(q, x=x[:, None, None], axis=0)
assert_almost_equal(r, qx)
r = trapz(q, x=y[None,:, None], axis=1)
assert_almost_equal(r, qy)
r = trapz(q, x=z[None, None,:], axis=2)
assert_almost_equal(r, qz)
# 1-d `x`
r = trapz(q, x=x, axis=0)
assert_almost_equal(r, qx)
r = trapz(q, x=y, axis=1)
assert_almost_equal(r, qy)
r = trapz(q, x=z, axis=2)
assert_almost_equal(r, qz)
def test_masked(self):
# Testing that masked arrays behave as if the function is 0 where
# masked
x = np.arange(5)
y = x * x
mask = x == 2
ym = np.ma.array(y, mask=mask)
r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16))
assert_almost_equal(trapz(ym, x), r)
xm = np.ma.array(x, mask=mask)
assert_almost_equal(trapz(ym, xm), r)
xm = np.ma.array(x, mask=mask)
assert_almost_equal(trapz(y, xm), r)
def test_matrix(self):
# Test to make sure matrices give the same answer as ndarrays
x = np.linspace(0, 5)
y = x * x
r = trapz(y, x)
mx = np.matrix(x)
my = np.matrix(y)
mr = trapz(my, mx)
assert_almost_equal(mr, r)
class TestSinc(TestCase):
def test_simple(self):
assert_(sinc(0) == 1)
w = sinc(np.linspace(-1, 1, 100))
# check symmetry
assert_array_almost_equal(w, flipud(w), 7)
def test_array_like(self):
x = [0, 0.5]
y1 = sinc(np.array(x))
y2 = sinc(list(x))
y3 = sinc(tuple(x))
assert_array_equal(y1, y2)
assert_array_equal(y1, y3)
class TestHistogram(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_simple(self):
n = 100
v = rand(n)
(a, b) = histogram(v)
# check if the sum of the bins equals the number of samples
assert_equal(np.sum(a, axis=0), n)
# check that the bin counts are evenly spaced when the data is from
# a linear function
(a, b) = histogram(np.linspace(0, 10, 100))
assert_array_equal(a, 10)
def test_one_bin(self):
# Ticket 632
hist, edges = histogram([1, 2, 3, 4], [1, 2])
assert_array_equal(hist, [2, ])
assert_array_equal(edges, [1, 2])
assert_raises(ValueError, histogram, [1, 2], bins=0)
h, e = histogram([1, 2], bins=1)
assert_equal(h, np.array([2]))
assert_allclose(e, np.array([1., 2.]))
def test_normed(self):
# Check that the integral of the density equals 1.
n = 100
v = rand(n)
a, b = histogram(v, normed=True)
area = np.sum(a * diff(b))
assert_almost_equal(area, 1)
# Check with non-constant bin widths (buggy but backwards
# compatible)
v = np.arange(10)
bins = [0, 1, 5, 9, 10]
a, b = histogram(v, bins, normed=True)
area = np.sum(a * diff(b))
assert_almost_equal(area, 1)
def test_density(self):
# Check that the integral of the density equals 1.
n = 100
v = rand(n)
a, b = histogram(v, density=True)
area = np.sum(a * diff(b))
assert_almost_equal(area, 1)
# Check with non-constant bin widths
v = np.arange(10)
bins = [0, 1, 3, 6, 10]
a, b = histogram(v, bins, density=True)
assert_array_equal(a, .1)
assert_equal(np.sum(a * diff(b)), 1)
# Variale bin widths are especially useful to deal with
# infinities.
v = np.arange(10)
bins = [0, 1, 3, 6, np.inf]
a, b = histogram(v, bins, density=True)
assert_array_equal(a, [.1, .1, .1, 0.])
# Taken from a bug report from N. Becker on the numpy-discussion
# mailing list Aug. 6, 2010.
counts, dmy = np.histogram(
[1, 2, 3, 4], [0.5, 1.5, np.inf], density=True)
assert_equal(counts, [.25, 0])
def test_outliers(self):
# Check that outliers are not tallied
a = np.arange(10) + .5
# Lower outliers
h, b = histogram(a, range=[0, 9])
assert_equal(h.sum(), 9)
# Upper outliers
h, b = histogram(a, range=[1, 10])
assert_equal(h.sum(), 9)
# Normalization
h, b = histogram(a, range=[1, 9], normed=True)
assert_almost_equal((h * diff(b)).sum(), 1, decimal=15)
# Weights
w = np.arange(10) + .5
h, b = histogram(a, range=[1, 9], weights=w, normed=True)
assert_equal((h * diff(b)).sum(), 1)
h, b = histogram(a, bins=8, range=[1, 9], weights=w)
assert_equal(h, w[1:-1])
def test_type(self):
# Check the type of the returned histogram
a = np.arange(10) + .5
h, b = histogram(a)
assert_(np.issubdtype(h.dtype, int))
h, b = histogram(a, normed=True)
assert_(np.issubdtype(h.dtype, float))
h, b = histogram(a, weights=np.ones(10, int))
assert_(np.issubdtype(h.dtype, int))
h, b = histogram(a, weights=np.ones(10, float))
assert_(np.issubdtype(h.dtype, float))
def test_f32_rounding(self):
# gh-4799, check that the rounding of the edges works with float32
x = np.array([276.318359, -69.593948, 21.329449], dtype=np.float32)
y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32)
counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100)
assert_equal(counts_hist.sum(), 3.)
def test_weights(self):
v = rand(100)
w = np.ones(100) * 5
a, b = histogram(v)
na, nb = histogram(v, normed=True)
wa, wb = histogram(v, weights=w)
nwa, nwb = histogram(v, weights=w, normed=True)
assert_array_almost_equal(a * 5, wa)
assert_array_almost_equal(na, nwa)
# Check weights are properly applied.
v = np.linspace(0, 10, 10)
w = np.concatenate((np.zeros(5), np.ones(5)))
wa, wb = histogram(v, bins=np.arange(11), weights=w)
assert_array_almost_equal(wa, w)
# Check with integer weights
wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1])
assert_array_equal(wa, [4, 5, 0, 1])
wa, wb = histogram(
[1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], normed=True)
assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4)
# Check weights with non-uniform bin widths
a, b = histogram(
np.arange(9), [0, 1, 3, 6, 10],
weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True)
assert_almost_equal(a, [.2, .1, .1, .075])
def test_exotic_weights(self):
# Test the use of weights that are not integer or floats, but e.g.
# complex numbers or object types.
# Complex weights
values = np.array([1.3, 2.5, 2.3])
weights = np.array([1, -1, 2]) + 1j * np.array([2, 1, 2])
# Check with custom bins
wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)
assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))
# Check with even bins
wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)
assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))
# Decimal weights
from decimal import Decimal
values = np.array([1.3, 2.5, 2.3])
weights = np.array([Decimal(1), Decimal(2), Decimal(3)])
# Check with custom bins
wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)
assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])
# Check with even bins
wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)
assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])
def test_no_side_effects(self):
# This is a regression test that ensures that values passed to
# ``histogram`` are unchanged.
values = np.array([1.3, 2.5, 2.3])
np.histogram(values, range=[-10, 10], bins=100)
assert_array_almost_equal(values, [1.3, 2.5, 2.3])
def test_empty(self):
a, b = histogram([], bins=([0, 1]))
assert_array_equal(a, np.array([0]))
assert_array_equal(b, np.array([0, 1]))
def test_error_binnum_type (self):
# Tests if right Error is raised if bins argument is float
vals = np.linspace(0.0, 1.0, num=100)
histogram(vals, 5)
assert_raises(TypeError, histogram, vals, 2.4)
def test_finite_range(self):
# Normal ranges should be fine
vals = np.linspace(0.0, 1.0, num=100)
histogram(vals, range=[0.25,0.75])
assert_raises(ValueError, histogram, vals, range=[np.nan,0.75])
assert_raises(ValueError, histogram, vals, range=[0.25,np.inf])
def test_bin_edge_cases(self):
# Ensure that floating-point computations correctly place edge cases.
arr = np.array([337, 404, 739, 806, 1007, 1811, 2012])
hist, edges = np.histogram(arr, bins=8296, range=(2, 2280))
mask = hist > 0
left_edges = edges[:-1][mask]
right_edges = edges[1:][mask]
for x, left, right in zip(arr, left_edges, right_edges):
self.assertGreaterEqual(x, left)
self.assertLess(x, right)
def test_last_bin_inclusive_range(self):
arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])
hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5))
self.assertEqual(hist[-1], 1)
class TestHistogramOptimBinNums(TestCase):
"""
Provide test coverage when using provided estimators for optimal number of
bins
"""
def test_empty(self):
estimator_list = ['fd', 'scott', 'rice', 'sturges',
'doane', 'sqrt', 'auto']
# check it can deal with empty data
for estimator in estimator_list:
a, b = histogram([], bins=estimator)
assert_array_equal(a, np.array([0]))
assert_array_equal(b, np.array([0, 1]))
def test_simple(self):
"""
Straightforward testing with a mixture of linspace data (for
consistency). All test values have been precomputed and the values
shouldn't change
"""
# Some basic sanity checking, with some fixed data.
# Checking for the correct number of bins
basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7,
'doane': 8, 'sqrt': 8, 'auto': 7},
500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10,
'doane': 12, 'sqrt': 23, 'auto': 10},
5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14,
'doane': 17, 'sqrt': 71, 'auto': 17}}
for testlen, expectedResults in basic_test.items():
# Create some sort of non uniform data to test with
# (2 peak uniform mixture)
x1 = np.linspace(-10, -1, testlen // 5 * 2)
x2 = np.linspace(1, 10, testlen // 5 * 3)
x = np.concatenate((x1, x2))
for estimator, numbins in expectedResults.items():
a, b = np.histogram(x, estimator)
assert_equal(len(a), numbins, err_msg="For the {0} estimator "
"with datasize of {1}".format(estimator, testlen))
def test_small(self):
"""
Smaller datasets have the potential to cause issues with the data
adaptive methods, especially the FD method. All bin numbers have been
precalculated.
"""
small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
'doane': 1, 'sqrt': 1},
2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2,
'doane': 1, 'sqrt': 2},
3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3,
'doane': 3, 'sqrt': 2}}
for testlen, expectedResults in small_dat.items():
testdat = np.arange(testlen)
for estimator, expbins in expectedResults.items():
a, b = np.histogram(testdat, estimator)
assert_equal(len(a), expbins, err_msg="For the {0} estimator "
"with datasize of {1}".format(estimator, testlen))
def test_incorrect_methods(self):
"""
Check a Value Error is thrown when an unknown string is passed in
"""
check_list = ['mad', 'freeman', 'histograms', 'IQR']
for estimator in check_list:
assert_raises(ValueError, histogram, [1, 2, 3], estimator)
def test_novariance(self):
"""
Check that methods handle no variance in data
Primarily for Scott and FD as the SD and IQR are both 0 in this case
"""
novar_dataset = np.ones(100)
novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
'doane': 1, 'sqrt': 1, 'auto': 1}
for estimator, numbins in novar_resultdict.items():
a, b = np.histogram(novar_dataset, estimator)
assert_equal(len(a), numbins, err_msg="{0} estimator, "
"No Variance test".format(estimator))
def test_outlier(self):
"""
Check the FD, Scott and Doane with outliers.
The FD estimates a smaller binwidth since it's less affected by
outliers. Since the range is so (artificially) large, this means more
bins, most of which will be empty, but the data of interest usually is
unaffected. The Scott estimator is more affected and returns fewer bins,
despite most of the variance being in one area of the data. The Doane
estimator lies somewhere between the other two.
"""
xcenter = np.linspace(-10, 10, 50)
outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter))
outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11}
for estimator, numbins in outlier_resultdict.items():
a, b = np.histogram(outlier_dataset, estimator)
assert_equal(len(a), numbins)
def test_simple_range(self):
"""
Straightforward testing with a mixture of linspace data (for
consistency). Adding in a 3rd mixture that will then be
completely ignored. All test values have been precomputed and
the shouldn't change.
"""
# some basic sanity checking, with some fixed data. Checking for the correct number of bins
basic_test = {50: {'fd': 8, 'scott': 8, 'rice': 15, 'sturges': 14, 'auto': 14},
500: {'fd': 15, 'scott': 16, 'rice': 32, 'sturges': 20, 'auto': 20},
5000: {'fd': 33, 'scott': 33, 'rice': 69, 'sturges': 27, 'auto': 33}}
for testlen, expectedResults in basic_test.items():
# create some sort of non uniform data to test with (3 peak uniform mixture)
x1 = np.linspace(-10, -1, testlen // 5 * 2)
x2 = np.linspace(1, 10, testlen // 5 * 3)
x3 = np.linspace(-100, -50, testlen)
x = np.hstack((x1, x2, x3))
for estimator, numbins in expectedResults.items():
a, b = np.histogram(x, estimator, range = (-20, 20))
msg = "For the {0} estimator with datasize of {1}".format(estimator, testlen)
assert_equal(len(a), numbins, err_msg=msg)
def test_simple_weighted(self):
"""
Check that weighted data raises a TypeError
"""
estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto']
for estimator in estimator_list:
assert_raises(TypeError, histogram, [1, 2, 3], estimator, weights=[1, 2, 3])
class TestHistogramdd(TestCase):
def test_simple(self):
x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5],
[.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]])
H, edges = histogramdd(x, (2, 3, 3),
range=[[-1, 1], [0, 3], [0, 3]])
answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]],
[[0, 1, 0], [0, 0, 1], [0, 0, 1]]])
assert_array_equal(H, answer)
# Check normalization
ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]]
H, edges = histogramdd(x, bins=ed, normed=True)
assert_(np.all(H == answer / 12.))
# Check that H has the correct shape.
H, edges = histogramdd(x, (2, 3, 4),
range=[[-1, 1], [0, 3], [0, 4]],
normed=True)
answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]],
[[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]])
assert_array_almost_equal(H, answer / 6., 4)
# Check that a sequence of arrays is accepted and H has the correct
# shape.
z = [np.squeeze(y) for y in split(x, 3, axis=1)]
H, edges = histogramdd(
z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]])
answer = np.array([[[0, 0], [0, 0], [0, 0]],
[[0, 1], [0, 0], [1, 0]],
[[0, 1], [0, 0], [0, 0]],
[[0, 0], [0, 0], [0, 0]]])
assert_array_equal(H, answer)
Z = np.zeros((5, 5, 5))
Z[list(range(5)), list(range(5)), list(range(5))] = 1.
H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5)
assert_array_equal(H, Z)
def test_shape_3d(self):
# All possible permutations for bins of different lengths in 3D.
bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4),
(4, 5, 6))
r = rand(10, 3)
for b in bins:
H, edges = histogramdd(r, b)
assert_(H.shape == b)
def test_shape_4d(self):
# All possible permutations for bins of different lengths in 4D.
bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4),
(5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6),
(7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7),
(4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5),
(6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5),
(5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4))
r = rand(10, 4)
for b in bins:
H, edges = histogramdd(r, b)
assert_(H.shape == b)
def test_weights(self):
v = rand(100, 2)
hist, edges = histogramdd(v)
n_hist, edges = histogramdd(v, normed=True)
w_hist, edges = histogramdd(v, weights=np.ones(100))
assert_array_equal(w_hist, hist)
w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, normed=True)
assert_array_equal(w_hist, n_hist)
w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2)
assert_array_equal(w_hist, 2 * hist)
def test_identical_samples(self):
x = np.zeros((10, 2), int)
hist, edges = histogramdd(x, bins=2)
assert_array_equal(edges[0], np.array([-0.5, 0., 0.5]))
def test_empty(self):
a, b = histogramdd([[], []], bins=([0, 1], [0, 1]))
assert_array_max_ulp(a, np.array([[0.]]))
a, b = np.histogramdd([[], [], []], bins=2)
assert_array_max_ulp(a, np.zeros((2, 2, 2)))
def test_bins_errors(self):
# There are two ways to specify bins. Check for the right errors
# when mixing those.
x = np.arange(8).reshape(2, 4)
assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5])
assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1])
assert_raises(
ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 2, 3]])
assert_raises(
ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]])
assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]]))
def test_inf_edges(self):
# Test using +/-inf bin edges works. See #1788.
with np.errstate(invalid='ignore'):
x = np.arange(6).reshape(3, 2)
expected = np.array([[1, 0], [0, 1], [0, 1]])
h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]])
assert_allclose(h, expected)
h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])])
assert_allclose(h, expected)
h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]])
assert_allclose(h, expected)
def test_rightmost_binedge(self):
# Test event very close to rightmost binedge. See Github issue #4266
x = [0.9999999995]
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
assert_(hist[0] == 0.0)
assert_(hist[1] == 1.)
x = [1.0]
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
assert_(hist[0] == 0.0)
assert_(hist[1] == 1.)
x = [1.0000000001]
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
assert_(hist[0] == 0.0)
assert_(hist[1] == 1.)
x = [1.0001]
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
assert_(hist[0] == 0.0)
assert_(hist[1] == 0.0)
def test_finite_range(self):
vals = np.random.random((100, 3))
histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]])
assert_raises(ValueError, histogramdd, vals,
range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]])
assert_raises(ValueError, histogramdd, vals,
range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]])
class TestUnique(TestCase):
def test_simple(self):
x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0])
assert_(np.all(unique(x) == [0, 1, 2, 3, 4]))
assert_(unique(np.array([1, 1, 1, 1, 1])) == np.array([1]))
x = ['widget', 'ham', 'foo', 'bar', 'foo', 'ham']
assert_(np.all(unique(x) == ['bar', 'foo', 'ham', 'widget']))
x = np.array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j])
assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10]))
class TestCheckFinite(TestCase):
def test_simple(self):
a = [1, 2, 3]
b = [1, 2, np.inf]
c = [1, 2, np.nan]
np.lib.asarray_chkfinite(a)
assert_raises(ValueError, np.lib.asarray_chkfinite, b)
assert_raises(ValueError, np.lib.asarray_chkfinite, c)
def test_dtype_order(self):
# Regression test for missing dtype and order arguments
a = [1, 2, 3]
a = np.lib.asarray_chkfinite(a, order='F', dtype=np.float64)
assert_(a.dtype == np.float64)
class TestCorrCoef(TestCase):
A = np.array(
[[0.15391142, 0.18045767, 0.14197213],
[0.70461506, 0.96474128, 0.27906989],
[0.9297531, 0.32296769, 0.19267156]])
B = np.array(
[[0.10377691, 0.5417086, 0.49807457],
[0.82872117, 0.77801674, 0.39226705],
[0.9314666, 0.66800209, 0.03538394]])
res1 = np.array(
[[1., 0.9379533, -0.04931983],
[0.9379533, 1., 0.30007991],
[-0.04931983, 0.30007991, 1.]])
res2 = np.array(
[[1., 0.9379533, -0.04931983, 0.30151751, 0.66318558, 0.51532523],
[0.9379533, 1., 0.30007991, -0.04781421, 0.88157256, 0.78052386],
[-0.04931983, 0.30007991, 1., -0.96717111, 0.71483595, 0.83053601],
[0.30151751, -0.04781421, -0.96717111, 1., -0.51366032, -0.66173113],
[0.66318558, 0.88157256, 0.71483595, -0.51366032, 1., 0.98317823],
[0.51532523, 0.78052386, 0.83053601, -0.66173113, 0.98317823, 1.]])
def test_non_array(self):
assert_almost_equal(np.corrcoef([0, 1, 0], [1, 0, 1]),
[[1., -1.], [-1., 1.]])
def test_simple(self):
tgt1 = corrcoef(self.A)
assert_almost_equal(tgt1, self.res1)
assert_(np.all(np.abs(tgt1) <= 1.0))
tgt2 = corrcoef(self.A, self.B)
assert_almost_equal(tgt2, self.res2)
assert_(np.all(np.abs(tgt2) <= 1.0))
def test_ddof(self):
# ddof raises DeprecationWarning
with suppress_warnings() as sup:
warnings.simplefilter("always")
assert_warns(DeprecationWarning, corrcoef, self.A, ddof=-1)
sup.filter(DeprecationWarning)
# ddof has no or negligible effect on the function
assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1)
assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2)
assert_almost_equal(corrcoef(self.A, ddof=3), self.res1)
assert_almost_equal(corrcoef(self.A, self.B, ddof=3), self.res2)
def test_bias(self):
# bias raises DeprecationWarning
with suppress_warnings() as sup:
warnings.simplefilter("always")
assert_warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0)
assert_warns(DeprecationWarning, corrcoef, self.A, bias=0)
sup.filter(DeprecationWarning)
# bias has no or negligible effect on the function
assert_almost_equal(corrcoef(self.A, bias=1), self.res1)
def test_complex(self):
x = np.array([[1, 2, 3], [1j, 2j, 3j]])
res = corrcoef(x)
tgt = np.array([[1., -1.j], [1.j, 1.]])
assert_allclose(res, tgt)
assert_(np.all(np.abs(res) <= 1.0))
def test_xy(self):
x = np.array([[1, 2, 3]])
y = np.array([[1j, 2j, 3j]])
assert_allclose(np.corrcoef(x, y), np.array([[1., -1.j], [1.j, 1.]]))
def test_empty(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter('always', RuntimeWarning)
assert_array_equal(corrcoef(np.array([])), np.nan)
assert_array_equal(corrcoef(np.array([]).reshape(0, 2)),
np.array([]).reshape(0, 0))
assert_array_equal(corrcoef(np.array([]).reshape(2, 0)),
np.array([[np.nan, np.nan], [np.nan, np.nan]]))
def test_extreme(self):
x = [[1e-100, 1e100], [1e100, 1e-100]]
with np.errstate(all='raise'):
c = corrcoef(x)
assert_array_almost_equal(c, np.array([[1., -1.], [-1., 1.]]))
assert_(np.all(np.abs(c) <= 1.0))
class TestCov(TestCase):
x1 = np.array([[0, 2], [1, 1], [2, 0]]).T
res1 = np.array([[1., -1.], [-1., 1.]])
x2 = np.array([0.0, 1.0, 2.0], ndmin=2)
frequencies = np.array([1, 4, 1])
x2_repeats = np.array([[0.0], [1.0], [1.0], [1.0], [1.0], [2.0]]).T
res2 = np.array([[0.4, -0.4], [-0.4, 0.4]])
unit_frequencies = np.ones(3, dtype=np.integer)
weights = np.array([1.0, 4.0, 1.0])
res3 = np.array([[2. / 3., -2. / 3.], [-2. / 3., 2. / 3.]])
unit_weights = np.ones(3)
x3 = np.array([0.3942, 0.5969, 0.7730, 0.9918, 0.7964])
def test_basic(self):
assert_allclose(cov(self.x1), self.res1)
def test_complex(self):
x = np.array([[1, 2, 3], [1j, 2j, 3j]])
assert_allclose(cov(x), np.array([[1., -1.j], [1.j, 1.]]))
def test_xy(self):
x = np.array([[1, 2, 3]])
y = np.array([[1j, 2j, 3j]])
assert_allclose(cov(x, y), np.array([[1., -1.j], [1.j, 1.]]))
def test_empty(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter('always', RuntimeWarning)
assert_array_equal(cov(np.array([])), np.nan)
assert_array_equal(cov(np.array([]).reshape(0, 2)),
np.array([]).reshape(0, 0))
assert_array_equal(cov(np.array([]).reshape(2, 0)),
np.array([[np.nan, np.nan], [np.nan, np.nan]]))
def test_wrong_ddof(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter('always', RuntimeWarning)
assert_array_equal(cov(self.x1, ddof=5),
np.array([[np.inf, -np.inf],
[-np.inf, np.inf]]))
def test_1D_rowvar(self):
assert_allclose(cov(self.x3), cov(self.x3, rowvar=0))
y = np.array([0.0780, 0.3107, 0.2111, 0.0334, 0.8501])
assert_allclose(cov(self.x3, y), cov(self.x3, y, rowvar=0))
def test_1D_variance(self):
assert_allclose(cov(self.x3, ddof=1), np.var(self.x3, ddof=1))
def test_fweights(self):
assert_allclose(cov(self.x2, fweights=self.frequencies),
cov(self.x2_repeats))
assert_allclose(cov(self.x1, fweights=self.frequencies),
self.res2)
assert_allclose(cov(self.x1, fweights=self.unit_frequencies),
self.res1)
nonint = self.frequencies + 0.5
assert_raises(TypeError, cov, self.x1, fweights=nonint)
f = np.ones((2, 3), dtype=np.integer)
assert_raises(RuntimeError, cov, self.x1, fweights=f)
f = np.ones(2, dtype=np.integer)
assert_raises(RuntimeError, cov, self.x1, fweights=f)
f = -1 * np.ones(3, dtype=np.integer)
assert_raises(ValueError, cov, self.x1, fweights=f)
def test_aweights(self):
assert_allclose(cov(self.x1, aweights=self.weights), self.res3)
assert_allclose(cov(self.x1, aweights=3.0 * self.weights),
cov(self.x1, aweights=self.weights))
assert_allclose(cov(self.x1, aweights=self.unit_weights), self.res1)
w = np.ones((2, 3))
assert_raises(RuntimeError, cov, self.x1, aweights=w)
w = np.ones(2)
assert_raises(RuntimeError, cov, self.x1, aweights=w)
w = -1.0 * np.ones(3)
assert_raises(ValueError, cov, self.x1, aweights=w)
def test_unit_fweights_and_aweights(self):
assert_allclose(cov(self.x2, fweights=self.frequencies,
aweights=self.unit_weights),
cov(self.x2_repeats))
assert_allclose(cov(self.x1, fweights=self.frequencies,
aweights=self.unit_weights),
self.res2)
assert_allclose(cov(self.x1, fweights=self.unit_frequencies,
aweights=self.unit_weights),
self.res1)
assert_allclose(cov(self.x1, fweights=self.unit_frequencies,
aweights=self.weights),
self.res3)
assert_allclose(cov(self.x1, fweights=self.unit_frequencies,
aweights=3.0 * self.weights),
cov(self.x1, aweights=self.weights))
assert_allclose(cov(self.x1, fweights=self.unit_frequencies,
aweights=self.unit_weights),
self.res1)
class Test_I0(TestCase):
def test_simple(self):
assert_almost_equal(
i0(0.5),
np.array(1.0634833707413234))
A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549])
assert_almost_equal(
i0(A),
np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049]))
B = np.array([[0.827002, 0.99959078],
[0.89694769, 0.39298162],
[0.37954418, 0.05206293],
[0.36465447, 0.72446427],
[0.48164949, 0.50324519]])
assert_almost_equal(
i0(B),
np.array([[1.17843223, 1.26583466],
[1.21147086, 1.03898290],
[1.03633899, 1.00067775],
[1.03352052, 1.13557954],
[1.05884290, 1.06432317]]))
class TestKaiser(TestCase):
def test_simple(self):
assert_(np.isfinite(kaiser(1, 1.0)))
assert_almost_equal(kaiser(0, 1.0),
np.array([]))
assert_almost_equal(kaiser(2, 1.0),
np.array([0.78984831, 0.78984831]))
assert_almost_equal(kaiser(5, 1.0),
np.array([0.78984831, 0.94503323, 1.,
0.94503323, 0.78984831]))
assert_almost_equal(kaiser(5, 1.56789),
np.array([0.58285404, 0.88409679, 1.,
0.88409679, 0.58285404]))
def test_int_beta(self):
kaiser(3, 4)
class TestMsort(TestCase):
def test_simple(self):
A = np.array([[0.44567325, 0.79115165, 0.54900530],
[0.36844147, 0.37325583, 0.96098397],
[0.64864341, 0.52929049, 0.39172155]])
assert_almost_equal(
msort(A),
np.array([[0.36844147, 0.37325583, 0.39172155],
[0.44567325, 0.52929049, 0.54900530],
[0.64864341, 0.79115165, 0.96098397]]))
class TestMeshgrid(TestCase):
def test_simple(self):
[X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7])
assert_array_equal(X, np.array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3]]))
assert_array_equal(Y, np.array([[4, 4, 4],
[5, 5, 5],
[6, 6, 6],
[7, 7, 7]]))
def test_single_input(self):
[X] = meshgrid([1, 2, 3, 4])
assert_array_equal(X, np.array([1, 2, 3, 4]))
def test_no_input(self):
args = []
assert_array_equal([], meshgrid(*args))
def test_indexing(self):
x = [1, 2, 3]
y = [4, 5, 6, 7]
[X, Y] = meshgrid(x, y, indexing='ij')
assert_array_equal(X, np.array([[1, 1, 1, 1],
[2, 2, 2, 2],
[3, 3, 3, 3]]))
assert_array_equal(Y, np.array([[4, 5, 6, 7],
[4, 5, 6, 7],
[4, 5, 6, 7]]))
# Test expected shapes:
z = [8, 9]
assert_(meshgrid(x, y)[0].shape == (4, 3))
assert_(meshgrid(x, y, indexing='ij')[0].shape == (3, 4))
assert_(meshgrid(x, y, z)[0].shape == (4, 3, 2))
assert_(meshgrid(x, y, z, indexing='ij')[0].shape == (3, 4, 2))
assert_raises(ValueError, meshgrid, x, y, indexing='notvalid')
def test_sparse(self):
[X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True)
assert_array_equal(X, np.array([[1, 2, 3]]))
assert_array_equal(Y, np.array([[4], [5], [6], [7]]))
def test_invalid_arguments(self):
# Test that meshgrid complains about invalid arguments
# Regression test for issue #4755:
# https://github.com/numpy/numpy/issues/4755
assert_raises(TypeError, meshgrid,
[1, 2, 3], [4, 5, 6, 7], indices='ij')
class TestPiecewise(TestCase):
def test_simple(self):
# Condition is single bool list
x = piecewise([0, 0], [True, False], [1])
assert_array_equal(x, [1, 0])
# List of conditions: single bool list
x = piecewise([0, 0], [[True, False]], [1])
assert_array_equal(x, [1, 0])
# Conditions is single bool array
x = piecewise([0, 0], np.array([True, False]), [1])
assert_array_equal(x, [1, 0])
# Condition is single int array
x = piecewise([0, 0], np.array([1, 0]), [1])
assert_array_equal(x, [1, 0])
# List of conditions: int array
x = piecewise([0, 0], [np.array([1, 0])], [1])
assert_array_equal(x, [1, 0])
x = piecewise([0, 0], [[False, True]], [lambda x:-1])
assert_array_equal(x, [0, -1])
def test_two_conditions(self):
x = piecewise([1, 2], [[True, False], [False, True]], [3, 4])
assert_array_equal(x, [3, 4])
def test_scalar_domains_three_conditions(self):
x = piecewise(3, [True, False, False], [4, 2, 0])
assert_equal(x, 4)
def test_default(self):
# No value specified for x[1], should be 0
x = piecewise([1, 2], [True, False], [2])
assert_array_equal(x, [2, 0])
# Should set x[1] to 3
x = piecewise([1, 2], [True, False], [2, 3])
assert_array_equal(x, [2, 3])
def test_0d(self):
x = np.array(3)
y = piecewise(x, x > 3, [4, 0])
assert_(y.ndim == 0)
assert_(y == 0)
x = 5
y = piecewise(x, [[True], [False]], [1, 0])
assert_(y.ndim == 0)
assert_(y == 1)
# With 3 ranges (It was failing, before)
y = piecewise(x, [False, False, True], [1, 2, 3])
assert_array_equal(y, 3)
def test_0d_comparison(self):
x = 3
y = piecewise(x, [x <= 3, x > 3], [4, 0]) # Should succeed.
assert_equal(y, 4)
# With 3 ranges (It was failing, before)
x = 4
y = piecewise(x, [x <= 3, (x > 3) * (x <= 5), x > 5], [1, 2, 3])
assert_array_equal(y, 2)
def test_multidimensional_extrafunc(self):
x = np.array([[-2.5, -1.5, -0.5],
[0.5, 1.5, 2.5]])
y = piecewise(x, [x < 0, x >= 2], [-1, 1, 3])
assert_array_equal(y, np.array([[-1., -1., -1.],
[3., 3., 1.]]))
class TestBincount(TestCase):
def test_simple(self):
y = np.bincount(np.arange(4))
assert_array_equal(y, np.ones(4))
def test_simple2(self):
y = np.bincount(np.array([1, 5, 2, 4, 1]))
assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1]))
def test_simple_weight(self):
x = np.arange(4)
w = np.array([0.2, 0.3, 0.5, 0.1])
y = np.bincount(x, w)
assert_array_equal(y, w)
def test_simple_weight2(self):
x = np.array([1, 2, 4, 5, 2])
w = np.array([0.2, 0.3, 0.5, 0.1, 0.2])
y = np.bincount(x, w)
assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1]))
def test_with_minlength(self):
x = np.array([0, 1, 0, 1, 1])
y = np.bincount(x, minlength=3)
assert_array_equal(y, np.array([2, 3, 0]))
def test_with_minlength_smaller_than_maxvalue(self):
x = np.array([0, 1, 1, 2, 2, 3, 3])
y = np.bincount(x, minlength=2)
assert_array_equal(y, np.array([1, 2, 2, 2]))
def test_with_minlength_and_weights(self):
x = np.array([1, 2, 4, 5, 2])
w = np.array([0.2, 0.3, 0.5, 0.1, 0.2])
y = np.bincount(x, w, 8)
assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1, 0, 0]))
def test_empty(self):
x = np.array([], dtype=int)
y = np.bincount(x)
assert_array_equal(x, y)
def test_empty_with_minlength(self):
x = np.array([], dtype=int)
y = np.bincount(x, minlength=5)
assert_array_equal(y, np.zeros(5, dtype=int))
def test_with_incorrect_minlength(self):
x = np.array([], dtype=int)
assert_raises_regex(TypeError,
"'str' object cannot be interpreted",
lambda: np.bincount(x, minlength="foobar"))
assert_raises_regex(ValueError,
"must be positive",
lambda: np.bincount(x, minlength=-1))
assert_raises_regex(ValueError,
"must be positive",
lambda: np.bincount(x, minlength=0))
x = np.arange(5)
assert_raises_regex(TypeError,
"'str' object cannot be interpreted",
lambda: np.bincount(x, minlength="foobar"))
assert_raises_regex(ValueError,
"minlength must be positive",
lambda: np.bincount(x, minlength=-1))
assert_raises_regex(ValueError,
"minlength must be positive",
lambda: np.bincount(x, minlength=0))
@dec.skipif(not HAS_REFCOUNT, "python has no sys.getrefcount")
def test_dtype_reference_leaks(self):
# gh-6805
intp_refcount = sys.getrefcount(np.dtype(np.intp))
double_refcount = sys.getrefcount(np.dtype(np.double))
for j in range(10):
np.bincount([1, 2, 3])
assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount)
assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount)
for j in range(10):
np.bincount([1, 2, 3], [4, 5, 6])
assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount)
assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount)
class TestInterp(TestCase):
def test_exceptions(self):
assert_raises(ValueError, interp, 0, [], [])
assert_raises(ValueError, interp, 0, [0], [1, 2])
assert_raises(ValueError, interp, 0, [0, 1], [1, 2], period=0)
assert_raises(ValueError, interp, 0, [], [], period=360)
assert_raises(ValueError, interp, 0, [0], [1, 2], period=360)
def test_basic(self):
x = np.linspace(0, 1, 5)
y = np.linspace(0, 1, 5)
x0 = np.linspace(0, 1, 50)
assert_almost_equal(np.interp(x0, x, y), x0)
def test_right_left_behavior(self):
# Needs range of sizes to test different code paths.
# size ==1 is special cased, 1 < size < 5 is linear search, and
# size >= 5 goes through local search and possibly binary search.
for size in range(1, 10):
xp = np.arange(size, dtype=np.double)
yp = np.ones(size, dtype=np.double)
incpts = np.array([-1, 0, size - 1, size], dtype=np.double)
decpts = incpts[::-1]
incres = interp(incpts, xp, yp)
decres = interp(decpts, xp, yp)
inctgt = np.array([1, 1, 1, 1], dtype=np.float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
incres = interp(incpts, xp, yp, left=0)
decres = interp(decpts, xp, yp, left=0)
inctgt = np.array([0, 1, 1, 1], dtype=np.float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
incres = interp(incpts, xp, yp, right=2)
decres = interp(decpts, xp, yp, right=2)
inctgt = np.array([1, 1, 1, 2], dtype=np.float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
incres = interp(incpts, xp, yp, left=0, right=2)
decres = interp(decpts, xp, yp, left=0, right=2)
inctgt = np.array([0, 1, 1, 2], dtype=np.float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
def test_scalar_interpolation_point(self):
x = np.linspace(0, 1, 5)
y = np.linspace(0, 1, 5)
x0 = 0
assert_almost_equal(np.interp(x0, x, y), x0)
x0 = .3
assert_almost_equal(np.interp(x0, x, y), x0)
x0 = np.float32(.3)
assert_almost_equal(np.interp(x0, x, y), x0)
x0 = np.float64(.3)
assert_almost_equal(np.interp(x0, x, y), x0)
x0 = np.nan
assert_almost_equal(np.interp(x0, x, y), x0)
def test_complex_interp(self):
# test complex interpolation
x = np.linspace(0, 1, 5)
y = np.linspace(0, 1, 5) + (1 + np.linspace(0, 1, 5))*1.0j
x0 = 0.3
y0 = x0 + (1+x0)*1.0j
assert_almost_equal(np.interp(x0, x, y), y0)
# test complex left and right
x0 = -1
left = 2 + 3.0j
assert_almost_equal(np.interp(x0, x, y, left=left), left)
x0 = 2.0
right = 2 + 3.0j
assert_almost_equal(np.interp(x0, x, y, right=right), right)
# test complex periodic
x = [-180, -170, -185, 185, -10, -5, 0, 365]
xp = [190, -190, 350, -350]
fp = [5+1.0j, 10+2j, 3+3j, 4+4j]
y = [7.5+1.5j, 5.+1.0j, 8.75+1.75j, 6.25+1.25j, 3.+3j, 3.25+3.25j,
3.5+3.5j, 3.75+3.75j]
assert_almost_equal(np.interp(x, xp, fp, period=360), y)
def test_zero_dimensional_interpolation_point(self):
x = np.linspace(0, 1, 5)
y = np.linspace(0, 1, 5)
x0 = np.array(.3)
assert_almost_equal(np.interp(x0, x, y), x0)
x0 = np.array(.3, dtype=object)
assert_almost_equal(np.interp(x0, x, y), .3)
def test_if_len_x_is_small(self):
xp = np.arange(0, 10, 0.0001)
fp = np.sin(xp)
assert_almost_equal(np.interp(np.pi, xp, fp), 0.0)
def test_period(self):
x = [-180, -170, -185, 185, -10, -5, 0, 365]
xp = [190, -190, 350, -350]
fp = [5, 10, 3, 4]
y = [7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75]
assert_almost_equal(np.interp(x, xp, fp, period=360), y)
x = np.array(x, order='F').reshape(2, -1)
y = np.array(y, order='C').reshape(2, -1)
assert_almost_equal(np.interp(x, xp, fp, period=360), y)
def compare_results(res, desired):
for i in range(len(desired)):
assert_array_equal(res[i], desired[i])
class TestPercentile(TestCase):
def test_basic(self):
x = np.arange(8) * 0.5
assert_equal(np.percentile(x, 0), 0.)
assert_equal(np.percentile(x, 100), 3.5)
assert_equal(np.percentile(x, 50), 1.75)
x[1] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.percentile(x, 0), np.nan)
assert_equal(np.percentile(x, 0, interpolation='nearest'), np.nan)
assert_(w[0].category is RuntimeWarning)
def test_api(self):
d = np.ones(5)
np.percentile(d, 5, None, None, False)
np.percentile(d, 5, None, None, False, 'linear')
o = np.ones((1,))
np.percentile(d, 5, None, o, False, 'linear')
def test_2D(self):
x = np.array([[1, 1, 1],
[1, 1, 1],
[4, 4, 3],
[1, 1, 1],
[1, 1, 1]])
assert_array_equal(np.percentile(x, 50, axis=0), [1, 1, 1])
def test_linear(self):
# Test defaults
assert_equal(np.percentile(range(10), 50), 4.5)
# explicitly specify interpolation_method 'linear' (the default)
assert_equal(np.percentile(range(10), 50,
interpolation='linear'), 4.5)
def test_lower_higher(self):
# interpolation_method 'lower'/'higher'
assert_equal(np.percentile(range(10), 50,
interpolation='lower'), 4)
assert_equal(np.percentile(range(10), 50,
interpolation='higher'), 5)
def test_midpoint(self):
assert_equal(np.percentile(range(10), 51,
interpolation='midpoint'), 4.5)
assert_equal(np.percentile(range(11), 51,
interpolation='midpoint'), 5.5)
assert_equal(np.percentile(range(11), 50,
interpolation='midpoint'), 5)
def test_nearest(self):
assert_equal(np.percentile(range(10), 51,
interpolation='nearest'), 5)
assert_equal(np.percentile(range(10), 49,
interpolation='nearest'), 4)
def test_sequence(self):
x = np.arange(8) * 0.5
assert_equal(np.percentile(x, [0, 100, 50]), [0, 3.5, 1.75])
def test_axis(self):
x = np.arange(12).reshape(3, 4)
assert_equal(np.percentile(x, (25, 50, 100)), [2.75, 5.5, 11.0])
r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]]
assert_equal(np.percentile(x, (25, 50, 100), axis=0), r0)
r1 = [[0.75, 1.5, 3], [4.75, 5.5, 7], [8.75, 9.5, 11]]
assert_equal(np.percentile(x, (25, 50, 100), axis=1), np.array(r1).T)
# ensure qth axis is always first as with np.array(old_percentile(..))
x = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
assert_equal(np.percentile(x, (25, 50)).shape, (2,))
assert_equal(np.percentile(x, (25, 50, 75)).shape, (3,))
assert_equal(np.percentile(x, (25, 50), axis=0).shape, (2, 4, 5, 6))
assert_equal(np.percentile(x, (25, 50), axis=1).shape, (2, 3, 5, 6))
assert_equal(np.percentile(x, (25, 50), axis=2).shape, (2, 3, 4, 6))
assert_equal(np.percentile(x, (25, 50), axis=3).shape, (2, 3, 4, 5))
assert_equal(
np.percentile(x, (25, 50, 75), axis=1).shape, (3, 3, 5, 6))
assert_equal(np.percentile(x, (25, 50),
interpolation="higher").shape, (2,))
assert_equal(np.percentile(x, (25, 50, 75),
interpolation="higher").shape, (3,))
assert_equal(np.percentile(x, (25, 50), axis=0,
interpolation="higher").shape, (2, 4, 5, 6))
assert_equal(np.percentile(x, (25, 50), axis=1,
interpolation="higher").shape, (2, 3, 5, 6))
assert_equal(np.percentile(x, (25, 50), axis=2,
interpolation="higher").shape, (2, 3, 4, 6))
assert_equal(np.percentile(x, (25, 50), axis=3,
interpolation="higher").shape, (2, 3, 4, 5))
assert_equal(np.percentile(x, (25, 50, 75), axis=1,
interpolation="higher").shape, (3, 3, 5, 6))
def test_scalar_q(self):
# test for no empty dimensions for compatiblity with old percentile
x = np.arange(12).reshape(3, 4)
assert_equal(np.percentile(x, 50), 5.5)
self.assertTrue(np.isscalar(np.percentile(x, 50)))
r0 = np.array([4., 5., 6., 7.])
assert_equal(np.percentile(x, 50, axis=0), r0)
assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape)
r1 = np.array([1.5, 5.5, 9.5])
assert_almost_equal(np.percentile(x, 50, axis=1), r1)
assert_equal(np.percentile(x, 50, axis=1).shape, r1.shape)
out = np.empty(1)
assert_equal(np.percentile(x, 50, out=out), 5.5)
assert_equal(out, 5.5)
out = np.empty(4)
assert_equal(np.percentile(x, 50, axis=0, out=out), r0)
assert_equal(out, r0)
out = np.empty(3)
assert_equal(np.percentile(x, 50, axis=1, out=out), r1)
assert_equal(out, r1)
# test for no empty dimensions for compatiblity with old percentile
x = np.arange(12).reshape(3, 4)
assert_equal(np.percentile(x, 50, interpolation='lower'), 5.)
self.assertTrue(np.isscalar(np.percentile(x, 50)))
r0 = np.array([4., 5., 6., 7.])
c0 = np.percentile(x, 50, interpolation='lower', axis=0)
assert_equal(c0, r0)
assert_equal(c0.shape, r0.shape)
r1 = np.array([1., 5., 9.])
c1 = np.percentile(x, 50, interpolation='lower', axis=1)
assert_almost_equal(c1, r1)
assert_equal(c1.shape, r1.shape)
out = np.empty((), dtype=x.dtype)
c = np.percentile(x, 50, interpolation='lower', out=out)
assert_equal(c, 5)
assert_equal(out, 5)
out = np.empty(4, dtype=x.dtype)
c = np.percentile(x, 50, interpolation='lower', axis=0, out=out)
assert_equal(c, r0)
assert_equal(out, r0)
out = np.empty(3, dtype=x.dtype)
c = np.percentile(x, 50, interpolation='lower', axis=1, out=out)
assert_equal(c, r1)
assert_equal(out, r1)
def test_exception(self):
assert_raises(ValueError, np.percentile, [1, 2], 56,
interpolation='foobar')
assert_raises(ValueError, np.percentile, [1], 101)
assert_raises(ValueError, np.percentile, [1], -1)
assert_raises(ValueError, np.percentile, [1], list(range(50)) + [101])
assert_raises(ValueError, np.percentile, [1], list(range(50)) + [-0.1])
def test_percentile_list(self):
assert_equal(np.percentile([1, 2, 3], 0), 1)
def test_percentile_out(self):
x = np.array([1, 2, 3])
y = np.zeros((3,))
p = (1, 2, 3)
np.percentile(x, p, out=y)
assert_equal(y, np.percentile(x, p))
x = np.array([[1, 2, 3],
[4, 5, 6]])
y = np.zeros((3, 3))
np.percentile(x, p, axis=0, out=y)
assert_equal(y, np.percentile(x, p, axis=0))
y = np.zeros((3, 2))
np.percentile(x, p, axis=1, out=y)
assert_equal(y, np.percentile(x, p, axis=1))
x = np.arange(12).reshape(3, 4)
# q.dim > 1, float
r0 = np.array([[2., 3., 4., 5.], [4., 5., 6., 7.]])
out = np.empty((2, 4))
assert_equal(np.percentile(x, (25, 50), axis=0, out=out), r0)
assert_equal(out, r0)
r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]])
out = np.empty((2, 3))
assert_equal(np.percentile(x, (25, 50), axis=1, out=out), r1)
assert_equal(out, r1)
# q.dim > 1, int
r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]])
out = np.empty((2, 4), dtype=x.dtype)
c = np.percentile(x, (25, 50), interpolation='lower', axis=0, out=out)
assert_equal(c, r0)
assert_equal(out, r0)
r1 = np.array([[0, 4, 8], [1, 5, 9]])
out = np.empty((2, 3), dtype=x.dtype)
c = np.percentile(x, (25, 50), interpolation='lower', axis=1, out=out)
assert_equal(c, r1)
assert_equal(out, r1)
def test_percentile_empty_dim(self):
# empty dims are preserved
d = np.arange(11 * 2).reshape(11, 1, 2, 1)
assert_array_equal(np.percentile(d, 50, axis=0).shape, (1, 2, 1))
assert_array_equal(np.percentile(d, 50, axis=1).shape, (11, 2, 1))
assert_array_equal(np.percentile(d, 50, axis=2).shape, (11, 1, 1))
assert_array_equal(np.percentile(d, 50, axis=3).shape, (11, 1, 2))
assert_array_equal(np.percentile(d, 50, axis=-1).shape, (11, 1, 2))
assert_array_equal(np.percentile(d, 50, axis=-2).shape, (11, 1, 1))
assert_array_equal(np.percentile(d, 50, axis=-3).shape, (11, 2, 1))
assert_array_equal(np.percentile(d, 50, axis=-4).shape, (1, 2, 1))
assert_array_equal(np.percentile(d, 50, axis=2,
interpolation='midpoint').shape,
(11, 1, 1))
assert_array_equal(np.percentile(d, 50, axis=-2,
interpolation='midpoint').shape,
(11, 1, 1))
assert_array_equal(np.array(np.percentile(d, [10, 50], axis=0)).shape,
(2, 1, 2, 1))
assert_array_equal(np.array(np.percentile(d, [10, 50], axis=1)).shape,
(2, 11, 2, 1))
assert_array_equal(np.array(np.percentile(d, [10, 50], axis=2)).shape,
(2, 11, 1, 1))
assert_array_equal(np.array(np.percentile(d, [10, 50], axis=3)).shape,
(2, 11, 1, 2))
def test_percentile_no_overwrite(self):
a = np.array([2, 3, 4, 1])
np.percentile(a, [50], overwrite_input=False)
assert_equal(a, np.array([2, 3, 4, 1]))
a = np.array([2, 3, 4, 1])
np.percentile(a, [50])
assert_equal(a, np.array([2, 3, 4, 1]))
def test_no_p_overwrite(self):
p = np.linspace(0., 100., num=5)
np.percentile(np.arange(100.), p, interpolation="midpoint")
assert_array_equal(p, np.linspace(0., 100., num=5))
p = np.linspace(0., 100., num=5).tolist()
np.percentile(np.arange(100.), p, interpolation="midpoint")
assert_array_equal(p, np.linspace(0., 100., num=5).tolist())
def test_percentile_overwrite(self):
a = np.array([2, 3, 4, 1])
b = np.percentile(a, [50], overwrite_input=True)
assert_equal(b, np.array([2.5]))
b = np.percentile([2, 3, 4, 1], [50], overwrite_input=True)
assert_equal(b, np.array([2.5]))
def test_extended_axis(self):
o = np.random.normal(size=(71, 23))
x = np.dstack([o] * 10)
assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30))
x = np.rollaxis(x, -1, 0)
assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30))
x = x.swapaxes(0, 1).copy()
assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30))
x = x.swapaxes(0, 1).copy()
assert_equal(np.percentile(x, [25, 60], axis=(0, 1, 2)),
np.percentile(x, [25, 60], axis=None))
assert_equal(np.percentile(x, [25, 60], axis=(0,)),
np.percentile(x, [25, 60], axis=0))
d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11))
np.random.shuffle(d.ravel())
assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0],
np.percentile(d[:,:,:, 0].flatten(), 25))
assert_equal(np.percentile(d, [10, 90], axis=(0, 1, 3))[:, 1],
np.percentile(d[:,:, 1,:].flatten(), [10, 90]))
assert_equal(np.percentile(d, 25, axis=(3, 1, -4))[2],
np.percentile(d[:,:, 2,:].flatten(), 25))
assert_equal(np.percentile(d, 25, axis=(3, 1, 2))[2],
np.percentile(d[2,:,:,:].flatten(), 25))
assert_equal(np.percentile(d, 25, axis=(3, 2))[2, 1],
np.percentile(d[2, 1,:,:].flatten(), 25))
assert_equal(np.percentile(d, 25, axis=(1, -2))[2, 1],
np.percentile(d[2,:,:, 1].flatten(), 25))
assert_equal(np.percentile(d, 25, axis=(1, 3))[2, 2],
np.percentile(d[2,:, 2,:].flatten(), 25))
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
assert_raises(IndexError, np.percentile, d, axis=-5, q=25)
assert_raises(IndexError, np.percentile, d, axis=(0, -5), q=25)
assert_raises(IndexError, np.percentile, d, axis=4, q=25)
assert_raises(IndexError, np.percentile, d, axis=(0, 4), q=25)
assert_raises(ValueError, np.percentile, d, axis=(1, 1), q=25)
def test_keepdims(self):
d = np.ones((3, 5, 7, 11))
assert_equal(np.percentile(d, 7, axis=None, keepdims=True).shape,
(1, 1, 1, 1))
assert_equal(np.percentile(d, 7, axis=(0, 1), keepdims=True).shape,
(1, 1, 7, 11))
assert_equal(np.percentile(d, 7, axis=(0, 3), keepdims=True).shape,
(1, 5, 7, 1))
assert_equal(np.percentile(d, 7, axis=(1,), keepdims=True).shape,
(3, 1, 7, 11))
assert_equal(np.percentile(d, 7, (0, 1, 2, 3), keepdims=True).shape,
(1, 1, 1, 1))
assert_equal(np.percentile(d, 7, axis=(0, 1, 3), keepdims=True).shape,
(1, 1, 7, 1))
assert_equal(np.percentile(d, [1, 7], axis=(0, 1, 3),
keepdims=True).shape, (2, 1, 1, 7, 1))
assert_equal(np.percentile(d, [1, 7], axis=(0, 3),
keepdims=True).shape, (2, 1, 5, 7, 1))
def test_out(self):
o = np.zeros((4,))
d = np.ones((3, 4))
assert_equal(np.percentile(d, 0, 0, out=o), o)
assert_equal(np.percentile(d, 0, 0, interpolation='nearest', out=o), o)
o = np.zeros((3,))
assert_equal(np.percentile(d, 1, 1, out=o), o)
assert_equal(np.percentile(d, 1, 1, interpolation='nearest', out=o), o)
o = np.zeros(())
assert_equal(np.percentile(d, 2, out=o), o)
assert_equal(np.percentile(d, 2, interpolation='nearest', out=o), o)
def test_out_nan(self):
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', '', RuntimeWarning)
o = np.zeros((4,))
d = np.ones((3, 4))
d[2, 1] = np.nan
assert_equal(np.percentile(d, 0, 0, out=o), o)
assert_equal(
np.percentile(d, 0, 0, interpolation='nearest', out=o), o)
o = np.zeros((3,))
assert_equal(np.percentile(d, 1, 1, out=o), o)
assert_equal(
np.percentile(d, 1, 1, interpolation='nearest', out=o), o)
o = np.zeros(())
assert_equal(np.percentile(d, 1, out=o), o)
assert_equal(
np.percentile(d, 1, interpolation='nearest', out=o), o)
def test_nan_behavior(self):
a = np.arange(24, dtype=float)
a[2] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.percentile(a, 0.3), np.nan)
assert_equal(np.percentile(a, 0.3, axis=0), np.nan)
assert_equal(np.percentile(a, [0.3, 0.6], axis=0),
np.array([np.nan] * 2))
assert_(w[0].category is RuntimeWarning)
assert_(w[1].category is RuntimeWarning)
assert_(w[2].category is RuntimeWarning)
a = np.arange(24, dtype=float).reshape(2, 3, 4)
a[1, 2, 3] = np.nan
a[1, 1, 2] = np.nan
# no axis
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.percentile(a, 0.3), np.nan)
assert_equal(np.percentile(a, 0.3).ndim, 0)
assert_(w[0].category is RuntimeWarning)
# axis0 zerod
b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 0)
b[2, 3] = np.nan
b[1, 2] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.percentile(a, 0.3, 0), b)
# axis0 not zerod
b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4),
[0.3, 0.6], 0)
b[:, 2, 3] = np.nan
b[:, 1, 2] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.percentile(a, [0.3, 0.6], 0), b)
# axis1 zerod
b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 1)
b[1, 3] = np.nan
b[1, 2] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.percentile(a, 0.3, 1), b)
# axis1 not zerod
b = np.percentile(
np.arange(24, dtype=float).reshape(2, 3, 4), [0.3, 0.6], 1)
b[:, 1, 3] = np.nan
b[:, 1, 2] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.percentile(a, [0.3, 0.6], 1), b)
# axis02 zerod
b = np.percentile(
np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, (0, 2))
b[1] = np.nan
b[2] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.percentile(a, 0.3, (0, 2)), b)
# axis02 not zerod
b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4),
[0.3, 0.6], (0, 2))
b[:, 1] = np.nan
b[:, 2] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.percentile(a, [0.3, 0.6], (0, 2)), b)
# axis02 not zerod with nearest interpolation
b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4),
[0.3, 0.6], (0, 2), interpolation='nearest')
b[:, 1] = np.nan
b[:, 2] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.percentile(
a, [0.3, 0.6], (0, 2), interpolation='nearest'), b)
class TestMedian(TestCase):
def test_basic(self):
a0 = np.array(1)
a1 = np.arange(2)
a2 = np.arange(6).reshape(2, 3)
assert_equal(np.median(a0), 1)
assert_allclose(np.median(a1), 0.5)
assert_allclose(np.median(a2), 2.5)
assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5])
assert_equal(np.median(a2, axis=1), [1, 4])
assert_allclose(np.median(a2, axis=None), 2.5)
a = np.array([0.0444502, 0.0463301, 0.141249, 0.0606775])
assert_almost_equal((a[1] + a[3]) / 2., np.median(a))
a = np.array([0.0463301, 0.0444502, 0.141249])
assert_equal(a[0], np.median(a))
a = np.array([0.0444502, 0.141249, 0.0463301])
assert_equal(a[-1], np.median(a))
# check array scalar result
assert_equal(np.median(a).ndim, 0)
a[1] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.median(a).ndim, 0)
assert_(w[0].category is RuntimeWarning)
def test_axis_keyword(self):
a3 = np.array([[2, 3],
[0, 1],
[6, 7],
[4, 5]])
for a in [a3, np.random.randint(0, 100, size=(2, 3, 4))]:
orig = a.copy()
np.median(a, axis=None)
for ax in range(a.ndim):
np.median(a, axis=ax)
assert_array_equal(a, orig)
assert_allclose(np.median(a3, axis=0), [3, 4])
assert_allclose(np.median(a3.T, axis=1), [3, 4])
assert_allclose(np.median(a3), 3.5)
assert_allclose(np.median(a3, axis=None), 3.5)
assert_allclose(np.median(a3.T), 3.5)
def test_overwrite_keyword(self):
a3 = np.array([[2, 3],
[0, 1],
[6, 7],
[4, 5]])
a0 = np.array(1)
a1 = np.arange(2)
a2 = np.arange(6).reshape(2, 3)
assert_allclose(np.median(a0.copy(), overwrite_input=True), 1)
assert_allclose(np.median(a1.copy(), overwrite_input=True), 0.5)
assert_allclose(np.median(a2.copy(), overwrite_input=True), 2.5)
assert_allclose(np.median(a2.copy(), overwrite_input=True, axis=0),
[1.5, 2.5, 3.5])
assert_allclose(
np.median(a2.copy(), overwrite_input=True, axis=1), [1, 4])
assert_allclose(
np.median(a2.copy(), overwrite_input=True, axis=None), 2.5)
assert_allclose(
np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4])
assert_allclose(np.median(a3.T.copy(), overwrite_input=True, axis=1),
[3, 4])
a4 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5))
np.random.shuffle(a4.ravel())
assert_allclose(np.median(a4, axis=None),
np.median(a4.copy(), axis=None, overwrite_input=True))
assert_allclose(np.median(a4, axis=0),
np.median(a4.copy(), axis=0, overwrite_input=True))
assert_allclose(np.median(a4, axis=1),
np.median(a4.copy(), axis=1, overwrite_input=True))
assert_allclose(np.median(a4, axis=2),
np.median(a4.copy(), axis=2, overwrite_input=True))
def test_array_like(self):
x = [1, 2, 3]
assert_almost_equal(np.median(x), 2)
x2 = [x]
assert_almost_equal(np.median(x2), 2)
assert_allclose(np.median(x2, axis=0), x)
def test_subclass(self):
# gh-3846
class MySubClass(np.ndarray):
def __new__(cls, input_array, info=None):
obj = np.asarray(input_array).view(cls)
obj.info = info
return obj
def mean(self, axis=None, dtype=None, out=None):
return -7
a = MySubClass([1, 2, 3])
assert_equal(np.median(a), -7)
def test_out(self):
o = np.zeros((4,))
d = np.ones((3, 4))
assert_equal(np.median(d, 0, out=o), o)
o = np.zeros((3,))
assert_equal(np.median(d, 1, out=o), o)
o = np.zeros(())
assert_equal(np.median(d, out=o), o)
def test_out_nan(self):
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', '', RuntimeWarning)
o = np.zeros((4,))
d = np.ones((3, 4))
d[2, 1] = np.nan
assert_equal(np.median(d, 0, out=o), o)
o = np.zeros((3,))
assert_equal(np.median(d, 1, out=o), o)
o = np.zeros(())
assert_equal(np.median(d, out=o), o)
def test_nan_behavior(self):
a = np.arange(24, dtype=float)
a[2] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.median(a), np.nan)
assert_equal(np.median(a, axis=0), np.nan)
assert_(w[0].category is RuntimeWarning)
assert_(w[1].category is RuntimeWarning)
a = np.arange(24, dtype=float).reshape(2, 3, 4)
a[1, 2, 3] = np.nan
a[1, 1, 2] = np.nan
# no axis
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.median(a), np.nan)
assert_equal(np.median(a).ndim, 0)
assert_(w[0].category is RuntimeWarning)
# axis0
b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 0)
b[2, 3] = np.nan
b[1, 2] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.median(a, 0), b)
assert_equal(len(w), 1)
# axis1
b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 1)
b[1, 3] = np.nan
b[1, 2] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.median(a, 1), b)
assert_equal(len(w), 1)
# axis02
b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), (0, 2))
b[1] = np.nan
b[2] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.median(a, (0, 2)), b)
assert_equal(len(w), 1)
def test_empty(self):
# empty arrays
a = np.array([], dtype=float)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.median(a), np.nan)
assert_(w[0].category is RuntimeWarning)
# multiple dimensions
a = np.array([], dtype=float, ndmin=3)
# no axis
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.median(a), np.nan)
assert_(w[0].category is RuntimeWarning)
# axis 0 and 1
b = np.array([], dtype=float, ndmin=2)
assert_equal(np.median(a, axis=0), b)
assert_equal(np.median(a, axis=1), b)
# axis 2
b = np.array(np.nan, dtype=float, ndmin=2)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.median(a, axis=2), b)
assert_(w[0].category is RuntimeWarning)
def test_object(self):
o = np.arange(7.)
assert_(type(np.median(o.astype(object))), float)
o[2] = np.nan
assert_(type(np.median(o.astype(object))), float)
def test_extended_axis(self):
o = np.random.normal(size=(71, 23))
x = np.dstack([o] * 10)
assert_equal(np.median(x, axis=(0, 1)), np.median(o))
x = np.rollaxis(x, -1, 0)
assert_equal(np.median(x, axis=(-2, -1)), np.median(o))
x = x.swapaxes(0, 1).copy()
assert_equal(np.median(x, axis=(0, -1)), np.median(o))
assert_equal(np.median(x, axis=(0, 1, 2)), np.median(x, axis=None))
assert_equal(np.median(x, axis=(0, )), np.median(x, axis=0))
assert_equal(np.median(x, axis=(-1, )), np.median(x, axis=-1))
d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11))
np.random.shuffle(d.ravel())
assert_equal(np.median(d, axis=(0, 1, 2))[0],
np.median(d[:,:,:, 0].flatten()))
assert_equal(np.median(d, axis=(0, 1, 3))[1],
np.median(d[:,:, 1,:].flatten()))
assert_equal(np.median(d, axis=(3, 1, -4))[2],
np.median(d[:,:, 2,:].flatten()))
assert_equal(np.median(d, axis=(3, 1, 2))[2],
np.median(d[2,:,:,:].flatten()))
assert_equal(np.median(d, axis=(3, 2))[2, 1],
np.median(d[2, 1,:,:].flatten()))
assert_equal(np.median(d, axis=(1, -2))[2, 1],
np.median(d[2,:,:, 1].flatten()))
assert_equal(np.median(d, axis=(1, 3))[2, 2],
np.median(d[2,:, 2,:].flatten()))
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
assert_raises(IndexError, np.median, d, axis=-5)
assert_raises(IndexError, np.median, d, axis=(0, -5))
assert_raises(IndexError, np.median, d, axis=4)
assert_raises(IndexError, np.median, d, axis=(0, 4))
assert_raises(ValueError, np.median, d, axis=(1, 1))
def test_keepdims(self):
d = np.ones((3, 5, 7, 11))
assert_equal(np.median(d, axis=None, keepdims=True).shape,
(1, 1, 1, 1))
assert_equal(np.median(d, axis=(0, 1), keepdims=True).shape,
(1, 1, 7, 11))
assert_equal(np.median(d, axis=(0, 3), keepdims=True).shape,
(1, 5, 7, 1))
assert_equal(np.median(d, axis=(1,), keepdims=True).shape,
(3, 1, 7, 11))
assert_equal(np.median(d, axis=(0, 1, 2, 3), keepdims=True).shape,
(1, 1, 1, 1))
assert_equal(np.median(d, axis=(0, 1, 3), keepdims=True).shape,
(1, 1, 7, 1))
class TestAdd_newdoc_ufunc(TestCase):
def test_ufunc_arg(self):
assert_raises(TypeError, add_newdoc_ufunc, 2, "blah")
assert_raises(ValueError, add_newdoc_ufunc, np.add, "blah")
def test_string_arg(self):
assert_raises(TypeError, add_newdoc_ufunc, np.add, 3)
class TestAdd_newdoc(TestCase):
@dec.skipif(sys.flags.optimize == 2)
def test_add_doc(self):
# test np.add_newdoc
tgt = "Current flat index into the array."
self.assertEqual(np.core.flatiter.index.__doc__[:len(tgt)], tgt)
self.assertTrue(len(np.core.ufunc.identity.__doc__) > 300)
self.assertTrue(len(np.lib.index_tricks.mgrid.__doc__) > 300)
if __name__ == "__main__":
run_module_suite()
| apache-2.0 |
h2oai/h2o-3 | h2o-bindings/bin/custom/python/gen_svd.py | 2 | 6425 | rest_api_version = 99
supervised_learning = False
def class_extensions():
def init_for_pipeline(self):
"""
Returns H2OSVD object which implements fit and transform method to be used in sklearn.Pipeline properly.
All parameters defined in self.__params, should be input parameters in H2OSVD.__init__ method.
:returns: H2OSVD object
:examples:
>>> from h2o.transforms.preprocessing import H2OScaler
>>> from h2o.estimators import H2ORandomForestEstimator
>>> from h2o.estimators import H2OSingularValueDecompositionEstimator
>>> from sklearn.pipeline import Pipeline
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> pipe = Pipeline([("standardize", H2OScaler()),
... ("svd", H2OSingularValueDecompositionEstimator(nv=3).init_for_pipeline()),
... ("rf", H2ORandomForestEstimator(seed=42,ntrees=50))])
>>> pipe.fit(arrests[1:], arrests[0])
"""
import inspect
from h2o.transforms.decomposition import H2OSVD
# check which parameters can be passed to H2OSVD init
var_names = list(dict(inspect.getmembers(H2OSVD.__init__.__code__))['co_varnames'])
parameters = {k: v for k, v in self._parms.items() if k in var_names}
return H2OSVD(**parameters)
extensions = dict(
__class__=class_extensions,
)
examples = dict(
nv="""
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator(nv=4,
... transform="standardize",
... max_iterations=2000)
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o
""",
transform="""
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator(nv=4,
... transform="standardize",
... max_iterations=2000)
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o
""",
max_iterations="""
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator(nv=4,
... transform="standardize",
... max_iterations=2000)
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o
""",
export_checkpoints_dir="""
>>> import tempfile
>>> from os import listdir
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> checkpoints_dir = tempfile.mkdtemp()
>>> fit_h2o = H2OSingularValueDecompositionEstimator(export_checkpoints_dir=checkpoints_dir,
... seed=-5)
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> len(listdir(checkpoints_dir))
""",
ignore_const_cols="""
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator(ignore_const_cols=False,
... nv=4)
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o
""",
keep_u="""
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator(keep_u=False)
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o
""",
max_runtime_secs="""
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator(nv=4,
... transform="standardize",
... max_runtime_secs=25)
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o
""",
score_each_iteration="""
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator(nv=4,
... score_each_iteration=True)
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o
""",
seed="""
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator(nv=4, seed=-3)
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o
""",
svd_method="""
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator(svd_method="power")
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o
""",
training_frame="""
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator()
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o
""",
u_name="""
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator(u_name="fit_h2o")
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o.u_name
>>> fit_h2o
""",
use_all_factor_levels="""
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> fit_h2o = H2OSingularValueDecompositionEstimator(use_all_factor_levels=False)
>>> fit_h2o.train(x=list(range(4)), training_frame=arrests)
>>> fit_h2o
""",
validation_frame="""
>>> arrests = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/pca_test/USArrests.csv")
>>> train, valid = arrests.split_frame(ratios=[.8])
>>> fit_h2o = H2OSingularValueDecompositionEstimator()
>>> fit_h2o.train(x=list(range(4)),
... training_frame=train,
... validation_frame=valid)
>>> fit_h2o
"""
)
| apache-2.0 |
loli/semisupervisedforests | sklearn/mixture/tests/test_gmm.py | 3 | 13417 | import unittest
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
rng = np.random.RandomState(0)
def test_sample_gaussian():
"""
Test sample generation from mixture.sample_gaussian where covariance
is diagonal, spherical and full
"""
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
"""
test a slow and naive implementation of lmvnpdf and
compare it to the vectorized version (mixture.lmvnpdf) to test
for correctness
"""
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
""" Train on degenerate data with 0 in some dimensions
"""
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
""" Train on 1-D data
"""
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
#X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
"""Test that multiple inits does not much worse than a single one"""
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
"""Test that the right number of parameters is estimated"""
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
"""
Test all of the covariance_types return the same BIC score for
1-dimensional, 1 component fits.
"""
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def test_aic():
""" Test the aic and bic criteria"""
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
sxjscience/tvm | python/tvm/relay/frontend/__init__.py | 2 | 1369 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Frontends for constructing Relay programs.
Contains the model importers currently defined
for Relay.
"""
from __future__ import absolute_import
from .mxnet import from_mxnet
from .mxnet_qnn_op_utils import quantize_conv_bias_mkldnn_from_var
from .keras import from_keras
from .onnx import from_onnx
from .tflite import from_tflite
from .coreml import from_coreml
from .caffe2 import from_caffe2
from .tensorflow import from_tensorflow
from .darknet import from_darknet
from .pytorch import from_pytorch
from .caffe import from_caffe
from .change_datatype import ChangeDatatype
| apache-2.0 |
loli/semisupervisedforests | sklearn/cluster/setup.py | 31 | 1248 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
loli/semisupervisedforests | examples/text/document_clustering.py | 31 | 8036 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
lsa = make_pipeline(svd, Normalizer(copy=False))
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not (opts.n_components or opts.use_hashing):
print("Top terms per cluster:")
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
MostafaGazar/tensorflow | tensorflow/contrib/learn/python/learn/estimators/base.py | 7 | 19731 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import types
import numpy as np
import six
from six import string_types
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io.data_feeder import setup_train_data_feeder
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
def _write_with_backup(filename, content):
if gfile.Exists(filename):
gfile.Rename(filename, filename + '.old', overwrite=True)
with gfile.Open(filename, 'w') as f:
f.write(content)
def _copy_dir(dir_in, dir_out):
gfile.MakeDirs(dir_out)
for name in gfile.ListDirectory(dir_in):
name_in = os.path.join(dir_in, name)
name_out = os.path.join(dir_out, name)
if gfile.IsDirectory(name_in):
gfile.MakeDirs(name_out)
_copy_dir(name_in, name_out)
else:
gfile.Copy(name_in, name_out, overwrite=True)
class DeprecatedMixin(object):
"""This is mixin for deprecated TensorFlowYYY classes."""
def __init__(self, *args, **kwargs):
this_class = type(self).__name__
alternative_class = this_class[len('TensorFlow'):]
logging.warning(
'%s class is deprecated. Please consider using %s as an alternative.',
this_class, alternative_class)
# Handle deprecated arguments.
self.__deprecated_n_classes = kwargs.get('n_classes', 0)
if self.__deprecated_n_classes < 1 and 'n_classes' in kwargs:
kwargs.pop('n_classes')
self.batch_size = kwargs.pop('batch_size', 32)
self.steps = kwargs.pop('steps', 200)
if 'optimizer' in kwargs or 'learning_rate' in kwargs:
self.learning_rate = kwargs.pop('learning_rate', 0.1)
self.optimizer = kwargs.pop('optimizer', 'Adagrad')
if isinstance(self.learning_rate, types.FunctionType):
raise ValueError('Function-like learning_rate are not supported '
'consider using custom Estimator.')
else:
learning_rate = self.learning_rate
if isinstance(self.optimizer, types.FunctionType):
optimizer = self.optimizer(learning_rate)
elif isinstance(self.optimizer, six.string_types):
optimizer = layers.OPTIMIZER_CLS_NAMES[self.optimizer](learning_rate)
else:
optimizer = self.optimizer
kwargs['optimizer'] = optimizer
if 'class_weight' in kwargs:
raise ValueError('Sorry we switched interface for providing class '
'weights. Please use weight column instead which '
'provides more granular control (per example).')
if 'clip_gradients' in kwargs:
logging.warning('clip_gradients argument in %s is now converted to '
'gradient_clip_norm.' % this_class)
kwargs['gradient_clip_norm'] = kwargs.pop('clip_gradients')
else:
kwargs['gradient_clip_norm'] = 5.0
if 'continue_training' in kwargs:
logging.warning('continue_training argument in %s is now ignored.' %
this_class)
kwargs.pop('continue_training')
if 'verbose' in kwargs:
logging.warning('verbose argument in %s is now ignored.' %
this_class)
kwargs.pop('verbose')
super(DeprecatedMixin, self).__init__(*args, **kwargs)
def fit(self, x, y, steps=None, batch_size=None, monitors=None, logdir=None):
if logdir is not None:
self._model_dir = logdir
return super(DeprecatedMixin, self).fit(
x=x, y=y, steps=steps or self.steps,
batch_size=batch_size or self.batch_size, monitors=monitors)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
axis=1):
"""Predict class or regression for `x`."""
if x is not None:
predict_data_feeder = setup_train_data_feeder(
x, None, n_classes=None,
batch_size=batch_size or self.batch_size,
shuffle=False, epochs=1)
result_iter = super(DeprecatedMixin, self)._infer_model(
input_fn=predict_data_feeder.input_builder,
feed_fn=predict_data_feeder.get_feed_dict_fn(),
outputs=outputs, as_iterable=True)
else:
result_iter = super(DeprecatedMixin, self)._infer_model(
input_fn=input_fn, outputs=outputs, as_iterable=True)
result = np.array(list(result_iter))
if self.__deprecated_n_classes > 1 and axis is not None:
return np.argmax(result, axis)
return result
def predict_proba(self, x=None, input_fn=None, batch_size=None, outputs=None):
return self.predict(x=x, input_fn=input_fn, batch_size=batch_size,
outputs=outputs, axis=None)
def save(self, path):
"""Saves checkpoints and graph to given path.
Args:
path: Folder to save model to.
"""
# Copy model dir into new path.
_copy_dir(self.model_dir, path)
class TensorFlowEstimator(estimator.Estimator, DeprecatedMixin):
"""Base class for all TensorFlow estimators."""
def __init__(self,
model_fn,
n_classes,
batch_size=32,
steps=200,
optimizer='Adagrad',
learning_rate=0.1,
clip_gradients=5.0,
class_weight=None,
continue_training=False,
config=None,
verbose=1):
"""Initializes a TensorFlowEstimator instance.
Args:
model_fn: Model function, that takes input `x`, `y` tensors and outputs
prediction and loss tensors.
n_classes: Number of classes in the target.
batch_size: Mini batch size.
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam",
"Adagrad".
learning_rate: If this is constant float value, no decay function is used.
Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
````python
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
````
clip_gradients: Clip norm of the gradients to this value to stop
gradient explosion.
class_weight: None or list of n_classes floats. Weight associated with
classes for loss computation. If not given, all classes are supposed to
have weight one.
continue_training: when continue_training is True, once initialized
model will be continually trained on every call of fit.
config: RunConfig object that controls the configurations of the
session, e.g. num_cores, gpu_memory_fraction, etc.
verbose: Controls the verbosity, possible values:
* 0: the algorithm and debug information is muted.
* 1: trainer prints the progress.
* 2: log device placement is printed.
"""
self.class_weight = class_weight
self.learning_rate = learning_rate
self.clip_gradients = clip_gradients
if isinstance(optimizer, six.string_types):
if optimizer not in layers.OPTIMIZER_CLS_NAMES:
raise ValueError(
'Optimizer name should be one of [%s], you provided %s.' %
(', '.join(layers.OPTIMIZER_CLS_NAMES), optimizer))
self.optimizer = optimizer
super(TensorFlowEstimator, self).__init__(
model_fn=self._get_model_fn(model_fn),
config=config)
self.n_classes = n_classes
self.batch_size = batch_size
self.steps = steps
self.verbose = verbose
self.continue_training = continue_training
self._data_feeder = None
def fit(self, x, y, steps=None, monitors=None, logdir=None):
"""Neural network model from provided `model_fn` and training data.
Note: called first time constructs the graph and initializers
variables. Subsequently, it will continue training the same model.
This logic follows partial_fit() interface in scikit-learn.
To restart learning, create new estimator.
Args:
x: matrix or tensor of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features. The training input
samples for fitting the model.
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class labels in classification, real numbers in regression).
steps: int, number of steps to train.
If None or 0, train for `self.steps`.
monitors: List of `BaseMonitor` objects to print training progress and
invoke early stopping.
logdir: the directory to save the log file that can be used for
optional visualization.
Returns:
Returns self.
"""
if logdir is not None:
self._model_dir = logdir
self._data_feeder = setup_train_data_feeder(
x, y, n_classes=self.n_classes, batch_size=self.batch_size)
self._train_model(input_fn=self._data_feeder.input_builder,
feed_fn=self._data_feeder.get_feed_dict_fn(),
steps=steps or self.steps,
monitors=monitors)
return self
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None):
"""Evaluates given model with provided evaluation data.
See superclass Estimator for more details.
Args:
x: features.
y: targets.
input_fn: Input function.
feed_fn: Function creating a feed dict every time it is called.
batch_size: minibatch size to use on the input.
steps: Number of steps for which to evaluate model.
metrics: Dict of metric ops to run. If None, the default metrics are used.
name: Name of the evaluation.
Returns:
Returns `dict` with evaluation results.
"""
feed_fn = None
if x is not None:
eval_data_feeder = setup_train_data_feeder(
x, y, n_classes=self.n_classes, batch_size=self.batch_size, epochs=1)
input_fn, feed_fn = (eval_data_feeder.input_builder,
eval_data_feeder.get_feed_dict_fn())
return self._evaluate_model(
input_fn=input_fn, feed_fn=feed_fn, steps=steps or self.steps,
name=name)
def partial_fit(self, x, y):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: matrix or tensor of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features. The training input
samples for fitting the model.
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class label in classification, real numbers in regression).
Returns:
Returns self.
"""
return self.fit(x, y)
def _predict(self, x, axis=-1, batch_size=None):
if self._graph is None:
raise NotFittedError()
# Use the batch size for fitting if the user did not specify one.
if batch_size is None:
batch_size = self.batch_size
predict_data_feeder = setup_train_data_feeder(
x, None, n_classes=None,
batch_size=batch_size,
shuffle=False, epochs=1)
preds = np.array(list(self._infer_model(
input_fn=predict_data_feeder.input_builder,
feed_fn=predict_data_feeder.get_feed_dict_fn(),
as_iterable=True)))
if self.n_classes > 1 and axis != -1:
preds = preds.argmax(axis=axis)
return preds
def predict(self, x, axis=1, batch_size=None):
"""Predict class or regression for `x`.
For a classification model, the predicted class for each sample in `x` is
returned. For a regression model, the predicted value based on `x` is
returned.
Args:
x: array-like matrix, [n_samples, n_features...] or iterator.
axis: Which axis to argmax for classification.
By default axis 1 (next after batch) is used.
Use 2 for sequence predictions.
batch_size: If test set is too big, use batch size to split
it into mini batches. By default the batch_size member
variable is used.
Returns:
y: array of shape [n_samples]. The predicted classes or predicted
value.
"""
return self._predict(x, axis=axis, batch_size=batch_size)
def predict_proba(self, x, batch_size=None):
"""Predict class probability of the input samples `x`.
Args:
x: array-like matrix, [n_samples, n_features...] or iterator.
batch_size: If test set is too big, use batch size to split
it into mini batches. By default the batch_size member variable is used.
Returns:
y: array of shape [n_samples, n_classes]. The predicted
probabilities for each class.
"""
return self._predict(x, batch_size=batch_size)
def get_tensor(self, name):
"""Returns tensor by name.
Args:
name: string, name of the tensor.
Returns:
Tensor.
"""
if self._graph is None:
raise NotFittedError
return self._graph.get_tensor_by_name(name)
def save(self, path):
"""Saves checkpoints and graph to given path.
Args:
path: Folder to save model to.
"""
if self._graph is None:
raise NotFittedError
# Copy model dir into new path.
_copy_dir(self.model_dir, path)
# Save model definition.
all_params = self.get_params()
params = {}
for key, value in all_params.items():
if not callable(value) and value is not None:
params[key] = value
params['class_name'] = type(self).__name__
model_def = json.dumps(
params,
default=lambda o: o.__dict__ if hasattr(o, '__dict__') else None)
_write_with_backup(os.path.join(path, 'model.def'), model_def)
def _restore(self, path):
"""Restores this estimator from given path.
Note: will rebuild the graph and initialize all parameters,
and will ignore provided model.
Args:
path: Path to checkpoints and other information.
"""
raise NotImplementedError
@classmethod
def restore(cls, path, config=None):
# pylint: disable=unused-argument
"""Restores model from give path.
Args:
path: Path to the checkpoints and other model information.
config: RunConfig object that controls the configurations of the session,
e.g. num_cores, gpu_memory_fraction, etc. This is allowed to be
reconfigured.
Returns:
Estimator, object of the subclass of TensorFlowEstimator.
Raises:
ValueError: if `path` does not contain a model definition.
"""
model_def_filename = os.path.join(path, 'model.def')
if not os.path.exists(model_def_filename):
raise ValueError("Restore folder doesn't contain model definition.")
# list of parameters that are allowed to be reconfigured
reconfigurable_params = ['_config']
_config = config # pylint: disable=unused-variable,invalid-name
with gfile.Open(model_def_filename) as fmodel:
model_def = json.loads(fmodel.read())
# TensorFlow binding requires parameters to be strings not unicode.
# Only issue in Python2.
for key, value in model_def.items():
if isinstance(value, string_types) and not isinstance(value, str):
model_def[key] = str(value)
if key in reconfigurable_params:
new_value = locals()[key]
if new_value is not None:
model_def[key] = new_value
class_name = model_def.pop('class_name')
if class_name == 'TensorFlowEstimator':
custom_estimator = TensorFlowEstimator(model_fn=None, **model_def)
# pylint: disable=protected-access
custom_estimator._restore(path)
return custom_estimator
# To avoid cyclical dependencies, import inside the function instead of
# the beginning of the file.
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.learn.python.learn import estimators
# Estimator must be one of the defined estimators in the __init__ file.
result = getattr(estimators, class_name)(**model_def)
# pylint: disable=protected-access
result._restore(path)
return result
def _get_model_fn(self, model_fn):
"""Backward compatibility way of adding class weight and IS_TRAINING.
TODO(ipolosukhin): Remove this function after new layers are available.
Specifically:
* dropout and batch norm should work via update ops.
* class weights should be retrieved from weights column or hparams.
Args:
model_fn: Core model function.
Returns:
Model function.
"""
def _model_fn(features, targets, mode):
"""Model function."""
ops.get_default_graph().add_to_collection('IS_TRAINING', mode == 'train')
if self.class_weight is not None:
constant_op.constant(self.class_weight, name='class_weight')
predictions, loss = model_fn(features, targets)
if isinstance(self.learning_rate, types.FunctionType):
learning_rate = self.learning_rate(contrib_framework.get_global_step())
else:
learning_rate = self.learning_rate
if isinstance(self.optimizer, types.FunctionType):
optimizer = self.optimizer(learning_rate)
else:
optimizer = self.optimizer
train_op = layers.optimize_loss(
loss,
contrib_framework.get_global_step(),
learning_rate=learning_rate,
optimizer=optimizer,
clip_gradients=self.clip_gradients)
return predictions, loss, train_op
return _model_fn
class TensorFlowBaseTransformer(TensorFlowEstimator, _sklearn.TransformerMixin):
"""TensorFlow Base Transformer class."""
def transform(self, x):
"""Transform `x` using trained transformer."""
return(super(TensorFlowBaseTransformer, self).predict(
x, axis=1, batch_size=None))
def fit_transform(self, x, y=None, monitors=None, logdir=None):
"""Fit transformer and transform `x` using trained transformer."""
return self.fit(x, y, monitors=monitors, logdir=logdir).transform(x)
| apache-2.0 |
janezhango/BigDataMachineLearning | py/testdir_single_jvm/test_GLM2_hastie.py | 2 | 3613 |
## Dataset created from this:
#
# from sklearn.datasets import make_hastie_10_2
# import numpy as np
# i = 1000000
# f = 10
# (X,y) = make_hastie_10_2(n_samples=i,random_state=None)
# y.shape = (i,1)
# Y = np.hstack((X,y))
# np.savetxt('./1mx' + str(f) + '_hastie_10_2.data', Y, delimiter=',', fmt='%.2f');
import unittest, time, sys, copy
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_util, h2o_hosts, h2o_import as h2i
def glm_doit(self, csvFilename, bucket, csvPathname, timeoutSecs=30):
print "\nStarting GLM of", csvFilename
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, hex_key=csvFilename + ".hex", schema='put', timeoutSecs=30)
y = 10
# Took n_folds out, because GLM doesn't include n_folds time and it's slow
# wanted to compare GLM time to my measured time
# hastie has two values 1,-1. need to specify case
kwargs = {'response': y, 'alpha': 0, 'family': 'binomial'}
# ToInt2.html?src_key=Twitter2DB.hex&column_index=2
# ToEnum2.html?src_key=Twitter2DB.hex&column_index=2
start = time.time()
# change the 1/-1 to enums
h2o.nodes[0].to_enum(src_key=parseResult['destination_key'], column_index=y+1)
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "GLM in", (time.time() - start), "secs (python)"
h2o_glm.simpleCheckGLM(self, glm, "C8", **kwargs)
# compare this glm to the first one. since the files are replications, the results
# should be similar?
validation = glm['glm_model']['submodels'][0]['validation']
if self.validation1:
h2o_glm.compareToFirstGlm(self, 'auc', validation, self.validation1)
else:
self.validation1 = copy.deepcopy(validation)
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1)
else:
h2o_hosts.build_cloud_with_hosts(1)
global SYNDATASETS_DIR
SYNDATASETS_DIR = h2o.make_syn_dir()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
validation1 = {}
def test_GLM2_hastie(self):
h2o.beta_features = True
# gunzip it and cat it to create 2x and 4x replications in SYNDATASETS_DIR
# FIX! eventually we'll compare the 1x, 2x and 4x results like we do
# in other tests. (catdata?)
bucket = 'home-0xdiag-datasets'
csvFilename = "1mx10_hastie_10_2.data.gz"
csvPathname = 'standard' + '/' + csvFilename
glm_doit(self, csvFilename, bucket, csvPathname, timeoutSecs=75)
fullPathname = h2i.find_folder_and_filename(bucket, csvPathname, returnFullPath=True)
filename1x = "hastie_1x.data"
pathname1x = SYNDATASETS_DIR + '/' + filename1x
h2o_util.file_gunzip(fullPathname, pathname1x)
filename2x = "hastie_2x.data"
pathname2x = SYNDATASETS_DIR + '/' + filename2x
h2o_util.file_cat(pathname1x,pathname1x,pathname2x)
glm_doit(self,filename2x, None, pathname2x, timeoutSecs=75)
filename4x = "hastie_4x.data"
pathname4x = SYNDATASETS_DIR + '/' + filename4x
h2o_util.file_cat(pathname2x,pathname2x,pathname4x)
print "Iterating 3 times on this last one for perf compare"
for i in range(3):
print "\nTrial #", i, "of", filename4x
glm_doit(self, filename4x, None, pathname4x, timeoutSecs=150)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
ellisk42/TikZ | noTraceBaseline.py | 1 | 14657 | from DSL import *
from graphicsSearch import serializeLine
from dispatch import dispatch
import time
import random
import numpy as np
import cPickle as pickle
import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Variable
import torch.optim as optimization
import torch.cuda as cuda
from torch.nn.utils.rnn import pack_padded_sequence
GPU = cuda.is_available()
def variable(x, volatile=False):
if isinstance(x,list): x = np.array(x)
if isinstance(x,(np.ndarray,np.generic)): x = torch.from_numpy(x)
if GPU: x = x.cuda()
return Variable(x, volatile=volatile)
LEXICON = ["START","END",
"circle",
"rectangle",
"line","arrow = True","arrow = False","solid = True","solid = False",
"for",
"reflect","x","y",
"}",
"if",
"i","j","k","None"] + map(str,range(-5,20))
symbolToIndex = dict(zip(LEXICON,range(len(LEXICON))))
@dispatch(Loop)
def serializeProgram(l):
return serializeLine(l) + ([] if l.boundary == None else ["if"] + serializeProgram(l.boundary) + ["}"]) + \
serializeProgram(l.body) + ["}"]
@dispatch(Reflection)
def serializeProgram(l):
return serializeLine(l) + serializeProgram(l.body) + ["}"]
@dispatch(Primitive)
def serializeProgram(l): return serializeLine(l)
@dispatch(Block)
def serializeProgram(l):
return [ c for x in l.items for c in serializeProgram(x) ]
def parseOutput(l):
def get(l):
n = l[0]
del l[0]
return n
def parseLinear(l):
b = int(get(l))
x = get(l)
m = int(get(l))
if x == 'None': x = None
return LinearExpression(m,x,b)
def parseBody(l):
items = []
while True:
if l == []: return Block(items)
if l[0] == "}":
get(l)
return Block(items)
items.append(parseAtomic(l))
def parseAtomic(l):
k = get(l)
if k == 'circle':
x = parseLinear(l)
y = parseLinear(l)
return Primitive(k,x,y)
if k == 'rectangle':
x1 = parseLinear(l)
y1 = parseLinear(l)
x2 = parseLinear(l)
y2 = parseLinear(l)
return Primitive(k,x1,y1,x2,y2)
if k == 'line':
x1 = parseLinear(l)
y1 = parseLinear(l)
x2 = parseLinear(l)
y2 = parseLinear(l)
a = get(l)
s = get(l)
return Primitive(k,x1,y1,x2,y2,
"arrow = True" == a,
"solid = True" == s)
if k == 'for':
v = get(l)
b = parseLinear(l)
if l[0] == "if":
get(l)
boundary = parseBody(l)
else: boundary = None
body = parseBody(l)
return Loop(v = v, bound = b, boundary = boundary, body = body)
if k == 'reflect':
a = get(l)
c = int(get(l))
body = parseBody(l)
return Reflection(body = body, axis = a, coordinate = c)
raise Exception('parsing line '+k)
return parseBody(l)
class CaptionEncoder(nn.Module):
def __init__(self):
super(CaptionEncoder, self).__init__()
(squareFilters,rectangularFilters,numberOfFilters,kernelSizes,poolSizes,poolStrides) = (20,2,[10],
[9,9],
[8,4],
[4,4])
self.squareFilters = nn.Conv2d(1, squareFilters, kernelSizes[0], padding = kernelSizes[0]/2)
self.verticalFilters = nn.Conv2d(1, rectangularFilters,
(kernelSizes[0]/2 - 1,kernelSizes[0]*2 - 1),
padding = (kernelSizes[0]/4 - 1,kernelSizes[0] - 1))
self.horizontalFilters = nn.Conv2d(1, rectangularFilters,
(kernelSizes[0]*2 - 1,kernelSizes[0]/2 - 1),
padding = (kernelSizes[0] - 1,kernelSizes[0]/4 - 1))
self.laterStages = nn.Sequential(nn.ReLU(),
nn.MaxPool2d(poolSizes[0],poolStrides[0],padding = poolSizes[0]/2 - 1),
nn.Conv2d(squareFilters + 2*rectangularFilters,
numberOfFilters[0],
kernelSizes[1],
padding = kernelSizes[1]/2),
nn.ReLU(),
nn.MaxPool2d(poolSizes[1],poolStrides[1],padding = poolSizes[1]/2 - 1))
def forward(self,x):
c1 = self.squareFilters(x)
c2 = self.verticalFilters(x)
c3 = self.horizontalFilters(x)
c0 = torch.cat((c1,c2,c3),dim = 1)
output = self.laterStages(c0)
return output
#return output.view(output.size(0),-1)
class CaptionDecoder(nn.Module):
def __init__(self):
super(CaptionDecoder, self).__init__()
IMAGEFEATURESIZE = 2560
EMBEDDINGSIZE = 64
INPUTSIZE = IMAGEFEATURESIZE + EMBEDDINGSIZE
HIDDEN = 1024
LAYERS = 2
# self.embedding : list of N indices (BxW) -> (B,W,EMBEDDINGSIZE)
self.embedding = nn.Embedding(len(LEXICON),EMBEDDINGSIZE)
# The embedding is combined with the image features at each time step
self.rnn = nn.LSTM(INPUTSIZE, HIDDEN, LAYERS, batch_first = True)
self.tokenPrediction = nn.Linear(HIDDEN,len(LEXICON))
def forward(self, features, captions, lengths):
# flatten the convolution output
features = features.view(features.size(0),-1)
e = self.embedding(captions) # e: BxLx embeddingSize
#print "e = ",e.size()
#expandedFeatures: BxTx2560
expandedFeatures = features.unsqueeze(1).expand(features.size(0),e.size(1),features.size(1))
#recurrentInputs: bxtxINPUTSIZE
recurrentInputs = torch.cat((expandedFeatures,e),2)
#print "recurrentInputs = ",recurrentInputs.size()
packed = pack_padded_sequence(recurrentInputs, lengths, batch_first = True)
hidden,_ = self.rnn(packed)
outputs = self.tokenPrediction(hidden[0])
#print "outputs = ",outputs.size()
return outputs
def sample(self, features):
result = ["START"]
# (1,1,F)
features = features.view(-1).unsqueeze(0).unsqueeze(0)
#features: 1x1x2560
states = None
while True:
e = self.embedding(variable([symbolToIndex[result[-1]]]).view((1,-1)))
recurrentInput = torch.cat((features,e),2)
output, states = self.rnn(recurrentInput,states)
distribution = self.tokenPrediction(output).view(-1)
distribution = F.log_softmax(distribution).data.exp()
draw = torch.multinomial(distribution,1)[0]
c = LEXICON[draw]
if len(result) > 20 or c == "END":
return result[1:]
else:
result.append(c)
def buildCaptions(self,tokens):
'''returns inputs, sizes, targets'''
#tokens = [ [self.symbolToIndex["START"]] + [ self.symbolToIndex[s] for s in serializeProgram(p) ] + [self.symbolToIndex["END"]]
# for p in programs ]
# The full token sequences are START, ..., END
# Training input sequences are START, ...
# Target output sequences are ..., END
# the sizes are actually one smaller therefore
# Make sure that the token sequences are decreasing in size
previousLength = None
for t in tokens:
assert previousLength == None or len(t) <= previousLength
previousLength = len(t)
sizes = map(lambda t: len(t) - 1,tokens)
maximumSize = max(sizes)
tokens = [ np.concatenate((p, np.zeros(maximumSize + 1 - len(p),dtype = np.int)))
for p in tokens ]
tokens = np.array(tokens)
return variable(tokens[:,:-1]),sizes,variable(tokens[:,1:])
class NoTrace(nn.Module):
def __init__(self):
super(NoTrace, self).__init__()
self.encoder = CaptionEncoder()
self.decoder = CaptionDecoder()
def sampleMany(self, sequence, duration):
image = variable(np.array([ sequence.draw() ], dtype = np.float32), volatile = True).unsqueeze(1)
startTime = time()
imageFeatures = self.encoder(image)
#imageFeatures: 1x10x16x16
programs = []
while time() < startTime + duration:
nextSequence = self.decoder.sample(imageFeatures)
try:
p = parseOutput(nextSequence)
print "Sampled",p
programs.append({"time": time() - startTime,
"program": p,
"spec": p.convertToSequence()})
except: continue
return programs
def loss(self,examples):
# IMPORTANT: Sort the examples by their size. recurrent network stuff needs this
examples.sort(key = lambda e: len(e.tokens), reverse = True)
x = variable(np.array([ e.sequence.draw() for e in examples], dtype = np.float32))
x = x.unsqueeze(1) # insert the channel
imageFeatures = self.encoder(x)
inputs, sizes, T = self.decoder.buildCaptions([ e.tokens for e in examples ])
outputDistributions = self.decoder(imageFeatures, inputs, sizes)
T = pack_padded_sequence(T, sizes, batch_first = True)[0]
return F.cross_entropy(outputDistributions, T)
def load(self,path):
if os.path.isfile(path):
if not GPU: stuff = torch.load(path,map_location = lambda s,l: s)
else: stuff = torch.load(path)
self.load_state_dict(stuff)
print "Loaded checkpoint",path
else:
print "Could not find checkpoint",path
def dump(self,path):
torch.save(self.state_dict(),path)
print "Dumped checkpoint",path
class TrainingExample():
def __init__(self,p):
try:
self.tokens = np.array([symbolToIndex["START"]] + [ symbolToIndex[s] for s in serializeProgram(p) ] + [symbolToIndex["END"]])
except KeyError:
print "Key error in tokenization",serializeProgram(p)
assert False
self.sequence = p.convertToSequence()
#self.program = p
if str(parseOutput(serializeProgram(p))) != str(p):
print "Serialization failure for program",p
print serializeProgram(p)
print parseOutput(serializeProgram(p))
assert False
def loadTrainingData(n):
print "About to load the examples"
alternatives = ['/scratch/ellisk/randomlyGeneratedPrograms.p',
'randomlyGeneratedPrograms.p']
for alternative in alternatives:
if os.path.exists(alternative):
trainingDataPath = alternative
print "Loading training data from",trainingDataPath
break
with open(trainingDataPath,'rb') as handle:
X = pickle.load(handle)
print "Keeping %d/%d examples"%(n,len(X))
pruned = []
for x in X:
x = pickle.loads(x)
if x.items != []:
pruned.append(TrainingExample(x))
if len(pruned) >= n:
break
print "Pruned down to %d examples"%(len(pruned))
return pruned
if __name__ == "__main__":
import sys
model = NoTrace()
if GPU:
print "Using the GPU"
model = model.float().cuda()
else:
print "Using the CPU"
model = model.float()
model.load("checkpoints/noTrace.torch")
if 'test' in sys.argv:
from groundTruthParses import *
import os
target = getGroundTruthParse('drawings/expert-%s.png'%(sys.argv[2]))
results = model.sampleMany(target, 60*60)
results.sort(key = lambda z: (z["spec"] - target, z["program"].totalCost(), z["time"]))
if len(results) > 0:
z = results[0]
print "Best program:"
print z["program"].pretty()
#showImage(np.concatenate((1 - target.draw(),s.draw()),axis = 1))
saveMatrixAsImage(255*np.concatenate((1 - target.draw(),z["spec"].draw()),axis = 0),
"noTraceOutputs/%s.png"%(sys.argv[2]))
with open("noTraceOutputs/%s.p"%(sys.argv[2]),'wb') as handle:
pickle.dump(z,handle)
os.exit(0)
if 'statistics' in sys.argv:
from groundTruthParses import *
import os
results = []
gt = []
for n in xrange(100):
gt = gt + [getGroundTruthParse('drawings/expert-%s.png'%(n))]
f = "noTraceOutputs/%d.p"%n
try:
with open(f,"rb") as handle:
result = pickle.load(handle)
results.append(result)
except:
results.append(None)
times = [ r["time"] if r else float('inf')
for r in results ]
medianTime = sorted(times)[len(times)/2]
print "Median time",medianTime
successful = sum(r is not None and (r["spec"] - g) == 0
for r,g in zip(results,gt) )
print "# times that we got a program which was consistent with the data",successful
os.exit(0)
#print "# Learnable parameters:",sum([ parameter.view(-1).shape[0] for parameter in model.parameters() ])
N = 1*(10**7)
B = 64
X = loadTrainingData(N)
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
E = 0
while True:
E += 1
print "epic",E
# scrambled the data
X = list(np.random.permutation(X))
start = 0
batchesPerLoop = N/B
batchIndex = 0
while start < N:
batch = X[start:start + B]
model.zero_grad()
L = model.loss(batch)
if batchIndex%50 == 0:
print "Batch [%d/%d], LOSS = %s"%(batchIndex,batchesPerLoop,L.data[0])
model.dump("checkpoints/noTrace.torch")
L.backward()
optimizer.step()
start += B
batchIndex += 1
| gpl-3.0 |
narahari92/spark | python/pyspark/ml/clustering.py | 15 | 41736 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import since, keyword_only
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaWrapper
from pyspark.ml.param.shared import *
from pyspark.ml.common import inherit_doc
__all__ = ['BisectingKMeans', 'BisectingKMeansModel', 'BisectingKMeansSummary',
'KMeans', 'KMeansModel',
'GaussianMixture', 'GaussianMixtureModel', 'GaussianMixtureSummary',
'LDA', 'LDAModel', 'LocalLDAModel', 'DistributedLDAModel']
class ClusteringSummary(JavaWrapper):
"""
.. note:: Experimental
Clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def predictionCol(self):
"""
Name for column of predicted clusters in `predictions`.
"""
return self._call_java("predictionCol")
@property
@since("2.1.0")
def predictions(self):
"""
DataFrame produced by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.1.0")
def featuresCol(self):
"""
Name for column of features in `predictions`.
"""
return self._call_java("featuresCol")
@property
@since("2.1.0")
def k(self):
"""
The number of clusters the model was trained with.
"""
return self._call_java("k")
@property
@since("2.1.0")
def cluster(self):
"""
DataFrame of predicted cluster centers for each training data point.
"""
return self._call_java("cluster")
@property
@since("2.1.0")
def clusterSizes(self):
"""
Size of (number of data points in) each cluster.
"""
return self._call_java("clusterSizes")
class GaussianMixtureModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by GaussianMixture.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def weights(self):
"""
Weight for each Gaussian distribution in the mixture.
This is a multinomial probability distribution over the k Gaussians,
where weights[i] is the weight for Gaussian i, and weights sum to 1.
"""
return self._call_java("weights")
@property
@since("2.0.0")
def gaussiansDF(self):
"""
Retrieve Gaussian distributions as a DataFrame.
Each row represents a Gaussian Distribution.
The DataFrame has two columns: mean (Vector) and cov (Matrix).
"""
return self._call_java("gaussiansDF")
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model
instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return GaussianMixtureSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class GaussianMixture(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasTol, HasSeed,
HasProbabilityCol, JavaMLWritable, JavaMLReadable):
"""
GaussianMixture clustering.
This class performs expectation maximization for multivariate Gaussian
Mixture Models (GMMs). A GMM represents a composite distribution of
independent Gaussian distributions with associated "mixing" weights
specifying each's contribution to the composite.
Given a set of sample points, this class will maximize the log-likelihood
for a mixture of k Gaussians, iterating until the log-likelihood changes by
less than convergenceTol, or until it has reached the max number of iterations.
While this process is generally guaranteed to converge, it is not guaranteed
to find a global optimum.
.. note:: For high-dimensional data (with many features), this algorithm may perform poorly.
This is due to high-dimensional data (a) making it difficult to cluster at all
(based on statistical/theoretical arguments) and (b) numerical issues with
Gaussian distributions.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([-0.1, -0.05 ]),),
... (Vectors.dense([-0.01, -0.1]),),
... (Vectors.dense([0.9, 0.8]),),
... (Vectors.dense([0.75, 0.935]),),
... (Vectors.dense([-0.83, -0.68]),),
... (Vectors.dense([-0.91, -0.76]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> gm = GaussianMixture(k=3, tol=0.0001,
... maxIter=10, seed=10)
>>> model = gm.fit(df)
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
3
>>> summary.clusterSizes
[2, 2, 2]
>>> summary.logLikelihood
8.14636...
>>> weights = model.weights
>>> len(weights)
3
>>> model.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[4].prediction == rows[5].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> gmm_path = temp_path + "/gmm"
>>> gm.save(gmm_path)
>>> gm2 = GaussianMixture.load(gmm_path)
>>> gm2.getK()
3
>>> model_path = temp_path + "/gmm_model"
>>> model.save(model_path)
>>> model2 = GaussianMixtureModel.load(model_path)
>>> model2.hasSummary
False
>>> model2.weights == model.weights
True
>>> model2.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model2.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "Number of independent Gaussians in the mixture model. " +
"Must be > 1.", typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None):
"""
__init__(self, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None)
"""
super(GaussianMixture, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.GaussianMixture",
self.uid)
self._setDefault(k=2, tol=0.01, maxIter=100)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return GaussianMixtureModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None):
"""
setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None)
Sets params for GaussianMixture.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
class GaussianMixtureSummary(ClusteringSummary):
"""
.. note:: Experimental
Gaussian mixture clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def probabilityCol(self):
"""
Name for column of predicted probability of each cluster in `predictions`.
"""
return self._call_java("probabilityCol")
@property
@since("2.1.0")
def probability(self):
"""
DataFrame of probabilities of each cluster for each training data point.
"""
return self._call_java("probability")
@property
@since("2.2.0")
def logLikelihood(self):
"""
Total log-likelihood for this model on the given data.
"""
return self._call_java("logLikelihood")
class KMeansSummary(ClusteringSummary):
"""
.. note:: Experimental
Summary of KMeans.
.. versionadded:: 2.1.0
"""
pass
class KMeansModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by KMeans.
.. versionadded:: 1.5.0
"""
@since("1.5.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Return the K-means cost (sum of squared distances of points to their nearest center)
for this model on the given data.
"""
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return KMeansSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class KMeans(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasTol, HasSeed,
JavaMLWritable, JavaMLReadable):
"""
K-means clustering with a k-means++ like initialization mode
(the k-means|| algorithm by Bahmani et al).
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> kmeans = KMeans(k=2, seed=1)
>>> model = kmeans.fit(df)
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.000...
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[0].prediction == rows[1].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> kmeans_path = temp_path + "/kmeans"
>>> kmeans.save(kmeans_path)
>>> kmeans2 = KMeans.load(kmeans_path)
>>> kmeans2.getK()
2
>>> model_path = temp_path + "/kmeans_model"
>>> model.save(model_path)
>>> model2 = KMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
.. versionadded:: 1.5.0
"""
k = Param(Params._dummy(), "k", "The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either \"random\" to " +
"choose random points as initial cluster centers, or \"k-means||\" " +
"to use a parallel variant of k-means++",
typeConverter=TypeConverters.toString)
initSteps = Param(Params._dummy(), "initSteps", "The number of steps for k-means|| " +
"initialization mode. Must be > 0.", typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None):
"""
__init__(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None)
"""
super(KMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.KMeans", self.uid)
self._setDefault(k=2, initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return KMeansModel(java_model)
@keyword_only
@since("1.5.0")
def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None):
"""
setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None)
Sets params for KMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("1.5.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
@since("1.5.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("1.5.0")
def getInitMode(self):
"""
Gets the value of `initMode`
"""
return self.getOrDefault(self.initMode)
@since("1.5.0")
def setInitSteps(self, value):
"""
Sets the value of :py:attr:`initSteps`.
"""
return self._set(initSteps=value)
@since("1.5.0")
def getInitSteps(self):
"""
Gets the value of `initSteps`
"""
return self.getOrDefault(self.initSteps)
class BisectingKMeansModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by BisectingKMeans.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Computes the sum of squared distances between the input points
and their corresponding cluster centers.
"""
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return BisectingKMeansSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class BisectingKMeans(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasSeed,
JavaMLWritable, JavaMLReadable):
"""
A bisecting k-means algorithm based on the paper "A comparison of document clustering
techniques" by Steinbach, Karypis, and Kumar, with modification to fit Spark.
The algorithm starts from a single cluster that contains all points.
Iteratively it finds divisible clusters on the bottom level and bisects each of them using
k-means, until there are `k` leaf clusters in total or no leaf clusters are divisible.
The bisecting steps of clusters on the same level are grouped together to increase parallelism.
If bisecting all divisible clusters on the bottom level would result more than `k` leaf
clusters, larger clusters get higher priority.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> bkm = BisectingKMeans(k=2, minDivisibleClusterSize=1.0)
>>> model = bkm.fit(df)
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.000...
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[0].prediction == rows[1].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> bkm_path = temp_path + "/bkm"
>>> bkm.save(bkm_path)
>>> bkm2 = BisectingKMeans.load(bkm_path)
>>> bkm2.getK()
2
>>> model_path = temp_path + "/bkm_model"
>>> model.save(model_path)
>>> model2 = BisectingKMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "The desired number of leaf clusters. Must be > 1.",
typeConverter=TypeConverters.toInt)
minDivisibleClusterSize = Param(Params._dummy(), "minDivisibleClusterSize",
"The minimum number of points (if >= 1.0) or the minimum " +
"proportion of points (if < 1.0) of a divisible cluster.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0):
"""
__init__(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0)
"""
super(BisectingKMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.BisectingKMeans",
self.uid)
self._setDefault(maxIter=20, k=4, minDivisibleClusterSize=1.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0):
"""
setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0)
Sets params for BisectingKMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def setMinDivisibleClusterSize(self, value):
"""
Sets the value of :py:attr:`minDivisibleClusterSize`.
"""
return self._set(minDivisibleClusterSize=value)
@since("2.0.0")
def getMinDivisibleClusterSize(self):
"""
Gets the value of `minDivisibleClusterSize` or its default value.
"""
return self.getOrDefault(self.minDivisibleClusterSize)
def _create_model(self, java_model):
return BisectingKMeansModel(java_model)
class BisectingKMeansSummary(ClusteringSummary):
"""
.. note:: Experimental
Bisecting KMeans clustering results for a given model.
.. versionadded:: 2.1.0
"""
pass
@inherit_doc
class LDAModel(JavaModel):
"""
Latent Dirichlet Allocation (LDA) model.
This abstraction permits for different underlying representations,
including local and distributed data structures.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def isDistributed(self):
"""
Indicates whether this instance is of type DistributedLDAModel
"""
return self._call_java("isDistributed")
@since("2.0.0")
def vocabSize(self):
"""Vocabulary size (number of terms or words in the vocabulary)"""
return self._call_java("vocabSize")
@since("2.0.0")
def topicsMatrix(self):
"""
Inferred topics, where each topic is represented by a distribution over terms.
This is a matrix of size vocabSize x k, where each column is a topic.
No guarantees are given about the ordering of the topics.
WARNING: If this model is actually a :py:class:`DistributedLDAModel` instance produced by
the Expectation-Maximization ("em") `optimizer`, then this method could involve
collecting a large amount of data to the driver (on the order of vocabSize x k).
"""
return self._call_java("topicsMatrix")
@since("2.0.0")
def logLikelihood(self, dataset):
"""
Calculates a lower bound on the log likelihood of the entire corpus.
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logLikelihood", dataset)
@since("2.0.0")
def logPerplexity(self, dataset):
"""
Calculate an upper bound on perplexity. (Lower is better.)
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logPerplexity", dataset)
@since("2.0.0")
def describeTopics(self, maxTermsPerTopic=10):
"""
Return the topics described by their top-weighted terms.
"""
return self._call_java("describeTopics", maxTermsPerTopic)
@since("2.0.0")
def estimatedDocConcentration(self):
"""
Value for :py:attr:`LDA.docConcentration` estimated from data.
If Online LDA was used and :py:attr:`LDA.optimizeDocConcentration` was set to false,
then this returns the fixed (given) value for the :py:attr:`LDA.docConcentration` parameter.
"""
return self._call_java("estimatedDocConcentration")
@inherit_doc
class DistributedLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Distributed model fitted by :py:class:`LDA`.
This type of model is currently only produced by Expectation-Maximization (EM).
This model stores the inferred topics, the full training dataset, and the topic distribution
for each training document.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def toLocal(self):
"""
Convert this distributed model to a local representation. This discards info about the
training dataset.
WARNING: This involves collecting a large :py:func:`topicsMatrix` to the driver.
"""
model = LocalLDAModel(self._call_java("toLocal"))
# SPARK-10931: Temporary fix to be removed once LDAModel defines Params
model._create_params_from_java()
model._transfer_params_from_java()
return model
@since("2.0.0")
def trainingLogLikelihood(self):
"""
Log likelihood of the observed tokens in the training set,
given the current parameter estimates:
log P(docs | topics, topic distributions for docs, Dirichlet hyperparameters)
Notes:
- This excludes the prior; for that, use :py:func:`logPrior`.
- Even with :py:func:`logPrior`, this is NOT the same as the data log likelihood given
the hyperparameters.
- This is computed from the topic distributions computed during training. If you call
:py:func:`logLikelihood` on the same training dataset, the topic distributions
will be computed again, possibly giving different results.
"""
return self._call_java("trainingLogLikelihood")
@since("2.0.0")
def logPrior(self):
"""
Log probability of the current parameter estimate:
log P(topics, topic distributions for docs | alpha, eta)
"""
return self._call_java("logPrior")
@since("2.0.0")
def getCheckpointFiles(self):
"""
If using checkpointing and :py:attr:`LDA.keepLastCheckpoint` is set to true, then there may
be saved checkpoint files. This method is provided so that users can manage those files.
.. note:: Removing the checkpoints can cause failures if a partition is lost and is needed
by certain :py:class:`DistributedLDAModel` methods. Reference counting will clean up
the checkpoints when this model and derivative data go out of scope.
:return List of checkpoint files from training
"""
return self._call_java("getCheckpointFiles")
@inherit_doc
class LocalLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Local (non-distributed) model fitted by :py:class:`LDA`.
This model stores the inferred topics only; it does not store info about the training dataset.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class LDA(JavaEstimator, HasFeaturesCol, HasMaxIter, HasSeed, HasCheckpointInterval,
JavaMLReadable, JavaMLWritable):
"""
Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
Terminology:
- "term" = "word": an el
- "token": instance of a term appearing in a document
- "topic": multinomial distribution over terms representing some concept
- "document": one piece of text, corresponding to one row in the input data
Original LDA paper (journal version):
Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
Input data (featuresCol):
LDA is given a collection of documents as input data, via the featuresCol parameter.
Each document is specified as a :py:class:`Vector` of length vocabSize, where each entry is the
count for the corresponding term (word) in the document. Feature transformers such as
:py:class:`pyspark.ml.feature.Tokenizer` and :py:class:`pyspark.ml.feature.CountVectorizer`
can be useful for converting text to word count vectors.
>>> from pyspark.ml.linalg import Vectors, SparseVector
>>> from pyspark.ml.clustering import LDA
>>> df = spark.createDataFrame([[1, Vectors.dense([0.0, 1.0])],
... [2, SparseVector(2, {0: 1.0})],], ["id", "features"])
>>> lda = LDA(k=2, seed=1, optimizer="em")
>>> model = lda.fit(df)
>>> model.isDistributed()
True
>>> localModel = model.toLocal()
>>> localModel.isDistributed()
False
>>> model.vocabSize()
2
>>> model.describeTopics().show()
+-----+-----------+--------------------+
|topic|termIndices| termWeights|
+-----+-----------+--------------------+
| 0| [1, 0]|[0.50401530077160...|
| 1| [0, 1]|[0.50401530077160...|
+-----+-----------+--------------------+
...
>>> model.topicsMatrix()
DenseMatrix(2, 2, [0.496, 0.504, 0.504, 0.496], 0)
>>> lda_path = temp_path + "/lda"
>>> lda.save(lda_path)
>>> sameLDA = LDA.load(lda_path)
>>> distributed_model_path = temp_path + "/lda_distributed_model"
>>> model.save(distributed_model_path)
>>> sameModel = DistributedLDAModel.load(distributed_model_path)
>>> local_model_path = temp_path + "/lda_local_model"
>>> localModel.save(local_model_path)
>>> sameLocalModel = LocalLDAModel.load(local_model_path)
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "The number of topics (clusters) to infer. Must be > 1.",
typeConverter=TypeConverters.toInt)
optimizer = Param(Params._dummy(), "optimizer",
"Optimizer or inference algorithm used to estimate the LDA model. "
"Supported: online, em", typeConverter=TypeConverters.toString)
learningOffset = Param(Params._dummy(), "learningOffset",
"A (positive) learning parameter that downweights early iterations."
" Larger values make early iterations count less",
typeConverter=TypeConverters.toFloat)
learningDecay = Param(Params._dummy(), "learningDecay", "Learning rate, set as an"
"exponential decay rate. This should be between (0.5, 1.0] to "
"guarantee asymptotic convergence.", typeConverter=TypeConverters.toFloat)
subsamplingRate = Param(Params._dummy(), "subsamplingRate",
"Fraction of the corpus to be sampled and used in each iteration "
"of mini-batch gradient descent, in range (0, 1].",
typeConverter=TypeConverters.toFloat)
optimizeDocConcentration = Param(Params._dummy(), "optimizeDocConcentration",
"Indicates whether the docConcentration (Dirichlet parameter "
"for document-topic distribution) will be optimized during "
"training.", typeConverter=TypeConverters.toBoolean)
docConcentration = Param(Params._dummy(), "docConcentration",
"Concentration parameter (commonly named \"alpha\") for the "
"prior placed on documents' distributions over topics (\"theta\").",
typeConverter=TypeConverters.toListFloat)
topicConcentration = Param(Params._dummy(), "topicConcentration",
"Concentration parameter (commonly named \"beta\" or \"eta\") for "
"the prior placed on topic' distributions over terms.",
typeConverter=TypeConverters.toFloat)
topicDistributionCol = Param(Params._dummy(), "topicDistributionCol",
"Output column with estimates of the topic mixture distribution "
"for each document (often called \"theta\" in the literature). "
"Returns a vector of zeros for an empty document.",
typeConverter=TypeConverters.toString)
keepLastCheckpoint = Param(Params._dummy(), "keepLastCheckpoint",
"(For EM optimizer) If using checkpointing, this indicates whether"
" to keep the last checkpoint. If false, then the checkpoint will be"
" deleted. Deleting the checkpoint can cause failures if a data"
" partition is lost, so set this bit with care.",
TypeConverters.toBoolean)
@keyword_only
def __init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
__init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
super(LDA, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.LDA", self.uid)
self._setDefault(maxIter=20, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
if self.getOptimizer() == "em":
return DistributedLDAModel(java_model)
else:
return LocalLDAModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
Sets params for LDA.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
>>> algo = LDA().setK(10)
>>> algo.getK()
10
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def setOptimizer(self, value):
"""
Sets the value of :py:attr:`optimizer`.
Currenlty only support 'em' and 'online'.
>>> algo = LDA().setOptimizer("em")
>>> algo.getOptimizer()
'em'
"""
return self._set(optimizer=value)
@since("2.0.0")
def getOptimizer(self):
"""
Gets the value of :py:attr:`optimizer` or its default value.
"""
return self.getOrDefault(self.optimizer)
@since("2.0.0")
def setLearningOffset(self, value):
"""
Sets the value of :py:attr:`learningOffset`.
>>> algo = LDA().setLearningOffset(100)
>>> algo.getLearningOffset()
100.0
"""
return self._set(learningOffset=value)
@since("2.0.0")
def getLearningOffset(self):
"""
Gets the value of :py:attr:`learningOffset` or its default value.
"""
return self.getOrDefault(self.learningOffset)
@since("2.0.0")
def setLearningDecay(self, value):
"""
Sets the value of :py:attr:`learningDecay`.
>>> algo = LDA().setLearningDecay(0.1)
>>> algo.getLearningDecay()
0.1...
"""
return self._set(learningDecay=value)
@since("2.0.0")
def getLearningDecay(self):
"""
Gets the value of :py:attr:`learningDecay` or its default value.
"""
return self.getOrDefault(self.learningDecay)
@since("2.0.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
>>> algo = LDA().setSubsamplingRate(0.1)
>>> algo.getSubsamplingRate()
0.1...
"""
return self._set(subsamplingRate=value)
@since("2.0.0")
def getSubsamplingRate(self):
"""
Gets the value of :py:attr:`subsamplingRate` or its default value.
"""
return self.getOrDefault(self.subsamplingRate)
@since("2.0.0")
def setOptimizeDocConcentration(self, value):
"""
Sets the value of :py:attr:`optimizeDocConcentration`.
>>> algo = LDA().setOptimizeDocConcentration(True)
>>> algo.getOptimizeDocConcentration()
True
"""
return self._set(optimizeDocConcentration=value)
@since("2.0.0")
def getOptimizeDocConcentration(self):
"""
Gets the value of :py:attr:`optimizeDocConcentration` or its default value.
"""
return self.getOrDefault(self.optimizeDocConcentration)
@since("2.0.0")
def setDocConcentration(self, value):
"""
Sets the value of :py:attr:`docConcentration`.
>>> algo = LDA().setDocConcentration([0.1, 0.2])
>>> algo.getDocConcentration()
[0.1..., 0.2...]
"""
return self._set(docConcentration=value)
@since("2.0.0")
def getDocConcentration(self):
"""
Gets the value of :py:attr:`docConcentration` or its default value.
"""
return self.getOrDefault(self.docConcentration)
@since("2.0.0")
def setTopicConcentration(self, value):
"""
Sets the value of :py:attr:`topicConcentration`.
>>> algo = LDA().setTopicConcentration(0.5)
>>> algo.getTopicConcentration()
0.5...
"""
return self._set(topicConcentration=value)
@since("2.0.0")
def getTopicConcentration(self):
"""
Gets the value of :py:attr:`topicConcentration` or its default value.
"""
return self.getOrDefault(self.topicConcentration)
@since("2.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
>>> algo = LDA().setTopicDistributionCol("topicDistributionCol")
>>> algo.getTopicDistributionCol()
'topicDistributionCol'
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def getTopicDistributionCol(self):
"""
Gets the value of :py:attr:`topicDistributionCol` or its default value.
"""
return self.getOrDefault(self.topicDistributionCol)
@since("2.0.0")
def setKeepLastCheckpoint(self, value):
"""
Sets the value of :py:attr:`keepLastCheckpoint`.
>>> algo = LDA().setKeepLastCheckpoint(False)
>>> algo.getKeepLastCheckpoint()
False
"""
return self._set(keepLastCheckpoint=value)
@since("2.0.0")
def getKeepLastCheckpoint(self):
"""
Gets the value of :py:attr:`keepLastCheckpoint` or its default value.
"""
return self.getOrDefault(self.keepLastCheckpoint)
if __name__ == "__main__":
import doctest
import pyspark.ml.clustering
from pyspark.sql import SparkSession
globs = pyspark.ml.clustering.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.clustering tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
exit(-1)
| apache-2.0 |
espg/scikit-learn | examples/linear_model/plot_ols.py | 13 | 2048 | # -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
The example below uses only the first feature of the `diabetes` dataset,
in order to illustrate the data points within the two-dimensional plot.
The straight line can be seen in the plot, showing how linear regression
attempts to draw a straight line that will best minimize the
residual sum of squares between the observed responses in the dataset,
and the responses predicted by the linear approximation.
The coefficients, residual sum of squares and the coefficient of
determination are also calculated.
"""
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
# Load the diabetes dataset
diabetes_X, diabetes_y = datasets.load_diabetes(return_X_y=True)
# Use only one feature
diabetes_X = diabetes_X[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes_y[:-20]
diabetes_y_test = diabetes_y[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# Make predictions using the testing set
diabetes_y_pred = regr.predict(diabetes_X_test)
# The coefficients
print("Coefficients: \n", regr.coef_)
# The mean squared error
print("Mean squared error: %.2f" % mean_squared_error(diabetes_y_test, diabetes_y_pred))
# The coefficient of determination: 1 is perfect prediction
print("Coefficient of determination: %.2f" % r2_score(diabetes_y_test, diabetes_y_pred))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color="black")
plt.plot(diabetes_X_test, diabetes_y_pred, color="blue", linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
espg/scikit-learn | examples/linear_model/plot_logistic_path.py | 12 | 2159 | """
==============================================
Regularization path of L1- Logistic Regression
==============================================
Train l1-penalized logistic regression models on a binary classification
problem derived from the Iris dataset.
The models are ordered from strongest regularized to least regularized. The 4
coefficients of the models are collected and plotted as a "regularization
path": on the left-hand side of the figure (strong regularizers), all the
coefficients are exactly 0. When regularization gets progressively looser,
coefficients can get non-zero values one after the other.
Here we choose the liblinear solver because it can efficiently optimize for the
Logistic Regression loss with a non-smooth, sparsity inducing l1 penalty.
Also note that we set a low value for the tolerance to make sure that the model
has converged before collecting the coefficients.
We also use warm_start=True which means that the coefficients of the models are
reused to initialize the next model fit to speed-up the computation of the
full-path.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
# %%
# Load data
# ---------
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X /= X.max() # Normalize X to speed-up convergence
# %%
# Compute regularization path
# ---------------------------
import numpy as np
from sklearn import linear_model
from sklearn.svm import l1_min_c
cs = l1_min_c(X, y, loss="log") * np.logspace(0, 7, 16)
clf = linear_model.LogisticRegression(
penalty="l1",
solver="liblinear",
tol=1e-6,
max_iter=int(1e6),
warm_start=True,
intercept_scaling=10000.0,
)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
coefs_ = np.array(coefs_)
# %%
# Plot regularization path
# ------------------------
import matplotlib.pyplot as plt
plt.plot(np.log10(cs), coefs_, marker="o")
ymin, ymax = plt.ylim()
plt.xlabel("log(C)")
plt.ylabel("Coefficients")
plt.title("Logistic Regression Path")
plt.axis("tight")
plt.show()
| bsd-3-clause |
espg/scikit-learn | sklearn/svm/tests/test_svm.py | 8 | 47550 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import warnings
import re
import numpy as np
import pytest
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_allclose
from scipy import sparse
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.svm import LinearSVC, OneClassSVM, SVR, NuSVR, LinearSVR
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils._testing import ignore_warnings
from sklearn.utils.validation import _num_samples
from sklearn.utils import shuffle
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import NotFittedError, UndefinedMetricWarning
from sklearn.multiclass import OneVsRestClassifier
# mypy error: Module 'sklearn.svm' has no attribute '_libsvm'
from sklearn.svm import _libsvm # type: ignore
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel="linear").fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.0])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ("linear", "rbf"):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert np.mean(clf.predict(iris.data) == iris.target) > 0.9
assert hasattr(clf, "coef_") == (k == "linear")
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
# We unpack the values to create a dictionary with some of the return values
# from Libsvm's fit.
(
libsvm_support,
libsvm_support_vectors,
libsvm_n_class_SV,
libsvm_sv_coef,
libsvm_intercept,
libsvm_probA,
libsvm_probB,
# libsvm_fit_status and libsvm_n_iter won't be used below.
libsvm_fit_status,
libsvm_n_iter,
) = _libsvm.fit(iris.data, iris.target.astype(np.float64))
model_params = {
"support": libsvm_support,
"SV": libsvm_support_vectors,
"nSV": libsvm_n_class_SV,
"sv_coef": libsvm_sv_coef,
"intercept": libsvm_intercept,
"probA": libsvm_probA,
"probB": libsvm_probB,
}
pred = _libsvm.predict(iris.data, **model_params)
assert np.mean(pred == iris.target) > 0.95
# We unpack the values to create a dictionary with some of the return values
# from Libsvm's fit.
(
libsvm_support,
libsvm_support_vectors,
libsvm_n_class_SV,
libsvm_sv_coef,
libsvm_intercept,
libsvm_probA,
libsvm_probB,
# libsvm_fit_status and libsvm_n_iter won't be used below.
libsvm_fit_status,
libsvm_n_iter,
) = _libsvm.fit(iris.data, iris.target.astype(np.float64), kernel="linear")
model_params = {
"support": libsvm_support,
"SV": libsvm_support_vectors,
"nSV": libsvm_n_class_SV,
"sv_coef": libsvm_sv_coef,
"intercept": libsvm_intercept,
"probA": libsvm_probA,
"probB": libsvm_probB,
}
pred = _libsvm.predict(iris.data, **model_params, kernel="linear")
assert np.mean(pred == iris.target) > 0.95
pred = _libsvm.cross_validation(
iris.data, iris.target.astype(np.float64), 5, kernel="linear", random_seed=0
)
assert np.mean(pred == iris.target) > 0.95
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deterministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = _libsvm.cross_validation(
iris.data, iris.target.astype(np.float64), 5, kernel="linear", random_seed=0
)
assert_array_equal(pred, pred2)
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel="precomputed")
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
with pytest.raises(ValueError):
clf.predict(KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
def kfunc(x, y):
return np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(np.array(X), Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel="precomputed")
clf2 = svm.SVC(kernel="linear")
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), 0.99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), 0.99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), 0.99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (
svm.NuSVR(kernel="linear", nu=0.4, C=1.0),
svm.NuSVR(kernel="linear", nu=0.4, C=10.0),
svm.SVR(kernel="linear", C=10.0),
svm.LinearSVR(C=10.0),
svm.LinearSVR(C=10.0),
):
clf.fit(diabetes.data, diabetes.target)
assert clf.score(diabetes.data, diabetes.target) > 0.02
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel="linear", C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_), np.linalg.norm(svr.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
def test_linearsvr_fit_sampleweight():
# check correct result when sample_weight is 1
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
n_samples = len(diabetes.target)
unit_weight = np.ones(n_samples)
lsvr = svm.LinearSVR(C=1e3, tol=1e-12, max_iter=10000).fit(
diabetes.data, diabetes.target, sample_weight=unit_weight
)
score1 = lsvr.score(diabetes.data, diabetes.target)
lsvr_no_weight = svm.LinearSVR(C=1e3, tol=1e-12, max_iter=10000).fit(
diabetes.data, diabetes.target
)
score2 = lsvr_no_weight.score(diabetes.data, diabetes.target)
assert_allclose(
np.linalg.norm(lsvr.coef_), np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001
)
assert_almost_equal(score1, score2, 2)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
random_weight = random_state.randint(0, 10, n_samples)
lsvr_unflat = svm.LinearSVR(C=1e3, tol=1e-12, max_iter=10000).fit(
diabetes.data, diabetes.target, sample_weight=random_weight
)
score3 = lsvr_unflat.score(
diabetes.data, diabetes.target, sample_weight=random_weight
)
X_flat = np.repeat(diabetes.data, random_weight, axis=0)
y_flat = np.repeat(diabetes.target, random_weight, axis=0)
lsvr_flat = svm.LinearSVR(C=1e3, tol=1e-12, max_iter=10000).fit(X_flat, y_flat)
score4 = lsvr_flat.score(X_flat, y_flat)
assert_almost_equal(score3, score4, 2)
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
with pytest.raises(ValueError):
clf.predict(X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_equal(pred, [1, -1, -1])
assert pred.dtype == np.dtype("intp")
assert_array_almost_equal(clf.intercept_, [-1.218], decimal=3)
assert_array_almost_equal(clf.dual_coef_, [[0.750, 0.750, 0.750, 0.750]], decimal=3)
with pytest.raises(AttributeError):
(lambda: clf.coef_)()
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert np.mean(y_pred_test == 1) > 0.9
y_pred_outliers = clf.predict(X_outliers)
assert np.mean(y_pred_outliers == -1) > 0.9
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_oneclass_score_samples():
X_train = [[1, 1], [1, 2], [2, 1]]
clf = svm.OneClassSVM(gamma=1).fit(X_train)
assert_array_equal(
clf.score_samples([[2.0, 2.0]]),
clf.decision_function([[2.0, 2.0]]) + clf.offset_,
)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel="linear", C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]])
assert_array_equal(clf.predict([[-0.1, -0.1]]), [1])
clf._dual_coef_ = np.array([[0.0, 1.0]])
assert_array_equal(clf.predict([[-0.1, -0.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (
svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0),
):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert np.mean(np.argmax(prob_predict, 1) == clf.predict(iris.data)) > 0.9
assert_almost_equal(
clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8
)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel="linear", C=0.1, decision_function_shape="ovo").fit(
iris.data, iris.target
)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction, clf.classes_[(clf.decision_function(X) > 0).astype(int)]
)
expected = np.array([-1.0, -0.66, -1.0, 0.66, 1.0, 1.0])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel="rbf", gamma=1, decision_function_shape="ovo")
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
@pytest.mark.parametrize("SVM", (svm.SVC, svm.NuSVC))
def test_decision_function_shape(SVM):
# check that decision_function_shape='ovr' or 'ovo' gives
# correct shape and is consistent with predict
clf = SVM(kernel="linear", decision_function_shape="ovr").fit(
iris.data, iris.target
)
dec = clf.decision_function(iris.data)
assert dec.shape == (len(iris.data), 3)
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = SVM(kernel="linear", decision_function_shape="ovr").fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert dec.shape == (len(X_test), 5)
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = SVM(kernel="linear", decision_function_shape="ovo").fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert dec.shape == (len(X_train), 10)
def test_svr_predict():
# Test SVR's decision_function
# Sanity check, test that predict implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel="linear", C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
# rbf kernel
reg = svm.SVR(kernel="rbf", gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(
n_samples=200, n_features=10, weights=[0.833, 0.167], random_state=2
)
for clf in (
linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC(),
):
clf.set_params(class_weight={0: 0.1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert f1_score(y_[100:], y_pred) > 0.3
@pytest.mark.parametrize("estimator", [svm.SVC(C=1e-2), svm.NuSVC()])
def test_svm_classifier_sided_sample_weight(estimator):
# fit a linear SVM and check that giving more weight to opposed samples
# in the space will flip the decision toward these samples.
X = [[-2, 0], [-1, -1], [0, -2], [0, 2], [1, 1], [2, 0]]
estimator.set_params(kernel="linear")
# check that with unit weights, a sample is supposed to be predicted on
# the boundary
sample_weight = [1] * 6
estimator.fit(X, Y, sample_weight=sample_weight)
y_pred = estimator.decision_function([[-1.0, 1.0]])
assert y_pred == pytest.approx(0)
# give more weights to opposed samples
sample_weight = [10.0, 0.1, 0.1, 0.1, 0.1, 10]
estimator.fit(X, Y, sample_weight=sample_weight)
y_pred = estimator.decision_function([[-1.0, 1.0]])
assert y_pred < 0
sample_weight = [1.0, 0.1, 10.0, 10.0, 0.1, 0.1]
estimator.fit(X, Y, sample_weight=sample_weight)
y_pred = estimator.decision_function([[-1.0, 1.0]])
assert y_pred > 0
@pytest.mark.parametrize("estimator", [svm.SVR(C=1e-2), svm.NuSVR(C=1e-2)])
def test_svm_regressor_sided_sample_weight(estimator):
# similar test to test_svm_classifier_sided_sample_weight but for
# SVM regressors
X = [[-2, 0], [-1, -1], [0, -2], [0, 2], [1, 1], [2, 0]]
estimator.set_params(kernel="linear")
# check that with unit weights, a sample is supposed to be predicted on
# the boundary
sample_weight = [1] * 6
estimator.fit(X, Y, sample_weight=sample_weight)
y_pred = estimator.predict([[-1.0, 1.0]])
assert y_pred == pytest.approx(1.5)
# give more weights to opposed samples
sample_weight = [10.0, 0.1, 0.1, 0.1, 0.1, 10]
estimator.fit(X, Y, sample_weight=sample_weight)
y_pred = estimator.predict([[-1.0, 1.0]])
assert y_pred < 1.5
sample_weight = [1.0, 0.1, 10.0, 10.0, 0.1, 0.1]
estimator.fit(X, Y, sample_weight=sample_weight)
y_pred = estimator.predict([[-1.0, 1.0]])
assert y_pred > 1.5
def test_svm_equivalence_sample_weight_C():
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_allclose(dual_coef_no_weight, clf.dual_coef_)
@pytest.mark.parametrize(
"Estimator, err_msg",
[
(svm.SVC, "Invalid input - all samples have zero or negative weights."),
(svm.NuSVC, "(negative dimensions are not allowed|nu is infeasible)"),
(svm.SVR, "Invalid input - all samples have zero or negative weights."),
(svm.NuSVR, "Invalid input - all samples have zero or negative weights."),
(svm.OneClassSVM, "Invalid input - all samples have zero or negative weights."),
],
ids=["SVC", "NuSVC", "SVR", "NuSVR", "OneClassSVM"],
)
@pytest.mark.parametrize(
"sample_weight",
[[0] * len(Y), [-0.3] * len(Y)],
ids=["weights-are-zero", "weights-are-negative"],
)
def test_negative_sample_weights_mask_all_samples(Estimator, err_msg, sample_weight):
est = Estimator(kernel="linear")
with pytest.raises(ValueError, match=err_msg):
est.fit(X, Y, sample_weight=sample_weight)
@pytest.mark.parametrize(
"Classifier, err_msg",
[
(
svm.SVC,
"Invalid input - all samples with positive weights have the same label",
),
(svm.NuSVC, "specified nu is infeasible"),
],
ids=["SVC", "NuSVC"],
)
@pytest.mark.parametrize(
"sample_weight",
[[0, -0.5, 0, 1, 1, 1], [1, 1, 1, 0, -0.1, -0.3]],
ids=["mask-label-1", "mask-label-2"],
)
def test_negative_weights_svc_leave_just_one_label(Classifier, err_msg, sample_weight):
clf = Classifier(kernel="linear")
with pytest.raises(ValueError, match=err_msg):
clf.fit(X, Y, sample_weight=sample_weight)
@pytest.mark.parametrize(
"Classifier, model",
[
(svm.SVC, {"when-left": [0.3998, 0.4], "when-right": [0.4, 0.3999]}),
(svm.NuSVC, {"when-left": [0.3333, 0.3333], "when-right": [0.3333, 0.3333]}),
],
ids=["SVC", "NuSVC"],
)
@pytest.mark.parametrize(
"sample_weight, mask_side",
[([1, -0.5, 1, 1, 1, 1], "when-left"), ([1, 1, 1, 0, 1, 1], "when-right")],
ids=["partial-mask-label-1", "partial-mask-label-2"],
)
def test_negative_weights_svc_leave_two_labels(
Classifier, model, sample_weight, mask_side
):
clf = Classifier(kernel="linear")
clf.fit(X, Y, sample_weight=sample_weight)
assert_allclose(clf.coef_, [model[mask_side]], rtol=1e-3)
@pytest.mark.parametrize(
"Estimator", [svm.SVC, svm.NuSVC, svm.NuSVR], ids=["SVC", "NuSVC", "NuSVR"]
)
@pytest.mark.parametrize(
"sample_weight",
[[1, -0.5, 1, 1, 1, 1], [1, 1, 1, 0, 1, 1]],
ids=["partial-mask-label-1", "partial-mask-label-2"],
)
def test_negative_weight_equal_coeffs(Estimator, sample_weight):
# model generates equal coefficients
est = Estimator(kernel="linear")
est.fit(X, Y, sample_weight=sample_weight)
coef = np.abs(est.coef_).ravel()
assert coef[0] == pytest.approx(coef[1], rel=1e-3)
@ignore_warnings(category=UndefinedMetricWarning)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test:
# class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight("balanced", classes=classes, y=y[unbalanced])
assert np.argmax(class_weights) == 2
for clf in (
svm.SVC(kernel="linear"),
svm.LinearSVC(random_state=0),
LogisticRegression(),
):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight="balanced")
y_pred_balanced = clf.fit(
X[unbalanced],
y[unbalanced],
).predict(X)
assert metrics.f1_score(y, y_pred, average="macro") <= metrics.f1_score(
y, y_pred_balanced, average="macro"
)
def test_bad_input():
# Test dimensions for labels
Y2 = Y[:-1] # wrong dimensions for labels
with pytest.raises(ValueError):
svm.SVC().fit(X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert not Xf.flags["C_CONTIGUOUS"]
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert not yf.flags["F_CONTIGUOUS"]
assert not yf.flags["C_CONTIGUOUS"]
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel="precomputed")
with pytest.raises(ValueError):
clf.fit(X, Y)
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
with pytest.raises(ValueError):
clf.predict(sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
with pytest.raises(ValueError):
clf.predict(X)
clf = svm.SVC()
clf.fit(X, Y)
with pytest.raises(ValueError):
clf.predict(Xt)
def test_svc_nonfinite_params():
# Check SVC throws ValueError when dealing with non-finite parameter values
rng = np.random.RandomState(0)
n_samples = 10
fmax = np.finfo(np.float64).max
X = fmax * rng.uniform(size=(n_samples, 2))
y = rng.randint(0, 2, size=n_samples)
clf = svm.SVC()
msg = "The dual coefficients or intercepts are not finite"
with pytest.raises(ValueError, match=msg):
clf.fit(X, y)
def test_unicode_kernel():
# Test that a unicode kernel name does not cause a TypeError
clf = svm.SVC(kernel="linear", probability=True)
clf.fit(X, Y)
clf.predict_proba(T)
_libsvm.cross_validation(
iris.data, iris.target.astype(np.float64), 5, kernel="linear", random_seed=0
)
def test_sparse_precomputed():
clf = svm.SVC(kernel="precomputed")
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
with pytest.raises(TypeError, match="Sparse precomputed"):
clf.fit(sparse_gram, [0, 1])
def test_sparse_fit_support_vectors_empty():
# Regression test for #14893
X_train = sparse.csr_matrix(
[[0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]]
)
y_train = np.array([0.04, 0.04, 0.10, 0.16])
model = svm.SVR(kernel="linear")
model.fit(X_train, y_train)
assert not model.support_vectors_.data.size
assert not model.dual_coef_.data.size
@pytest.mark.parametrize("loss", ["hinge", "squared_hinge"])
@pytest.mark.parametrize("penalty", ["l1", "l2"])
@pytest.mark.parametrize("dual", [True, False])
def test_linearsvc_parameters(loss, penalty, dual):
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
X, y = make_classification(n_samples=5, n_features=5, random_state=0)
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual, random_state=0)
if (
(loss, penalty) == ("hinge", "l1")
or (loss, penalty, dual) == ("hinge", "l2", False)
or (penalty, dual) == ("l1", True)
):
with pytest.raises(
ValueError,
match="Unsupported set of arguments.*penalty='%s.*loss='%s.*dual=%s"
% (penalty, loss, dual),
):
clf.fit(X, y)
else:
clf.fit(X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert clf.fit_intercept
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(
penalty="l1", loss="squared_hinge", dual=False, random_state=0
).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty="l2", dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty="l2", loss="hinge", dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class="crammer_singer", random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert (ovr_clf.predict(iris.data) == cs_clf.predict(iris.data)).mean() > 0.9
# classifiers shouldn't be the same
assert (ovr_clf.coef_ != cs_clf.coef_).all()
# test decision function
assert_array_equal(
cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1),
)
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_linearsvc_fit_sampleweight():
# check correct result when sample_weight is 1
n_samples = len(X)
unit_weight = np.ones(n_samples)
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf_unitweight = svm.LinearSVC(random_state=0, tol=1e-12, max_iter=1000).fit(
X, Y, sample_weight=unit_weight
)
# check if same as sample_weight=None
assert_array_equal(clf_unitweight.predict(T), clf.predict(T))
assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
random_weight = random_state.randint(0, 10, n_samples)
lsvc_unflat = svm.LinearSVC(random_state=0, tol=1e-12, max_iter=1000).fit(
X, Y, sample_weight=random_weight
)
pred1 = lsvc_unflat.predict(T)
X_flat = np.repeat(X, random_weight, axis=0)
y_flat = np.repeat(Y, random_weight, axis=0)
lsvc_flat = svm.LinearSVC(random_state=0, tol=1e-12, max_iter=1000).fit(
X_flat, y_flat
)
pred2 = lsvc_flat.predict(T)
assert_array_equal(pred1, pred2)
assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001)
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = (
svm.LinearSVC(
fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0,
)
.fit(X, y)
.score(X, y)
)
assert acc > 0.9
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert set(clf.classes_) == set(iris.target_names)
assert np.mean(clf.predict(iris.data) == target) > 0.8
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1], [3, 1], [1, 3], [2, 3]]
y = [0, 0, 1, 1]
clf = classifier(
fit_intercept=True,
penalty="l1",
loss="squared_hinge",
dual=False,
C=4,
tol=1e-7,
random_state=0,
)
assert clf.intercept_scaling == 1, clf.intercept_scaling
assert clf.fit_intercept
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert intercept1 < -1
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1], [3, 1], [1, 3], [2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel="linear").fit(iris.data, iris.target),
svm.NuSVC(kernel="linear").fit(iris.data, iris.target),
svm.SVR(kernel="linear").fit(iris.data, iris.target),
svm.NuSVR(kernel="linear").fit(iris.data, iris.target),
svm.OneClassSVM(kernel="linear").fit(iris.data),
]
for clf in svms:
with pytest.raises(AttributeError):
clf.__setattr__("coef_", np.arange(3))
with pytest.raises((RuntimeError, ValueError)):
clf.coef_.__setitem__((0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(
kernel=lambda x, y: np.dot(x, y.T),
probability=True,
random_state=0,
decision_function_shape="ovr",
)
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(
kernel="linear", probability=True, random_state=0, decision_function_shape="ovr"
)
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_, svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_, svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data), svm_builtin.predict(iris.data))
assert_array_almost_equal(
svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4,
)
assert_array_almost_equal(
svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data),
)
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
with pytest.raises(ValueError):
svc.fit(X, Y)
def test_libsvm_convergence_warnings():
a = svm.SVC(
kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, max_iter=2
)
warning_msg = (
r"Solver terminated early \(max_iter=2\). Consider pre-processing "
r"your data with StandardScaler or MinMaxScaler."
)
with pytest.warns(ConvergenceWarning, match=warning_msg):
a.fit(np.array(X), Y)
assert np.all(a.n_iter_ == 2)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
with pytest.raises(Exception, match=r".*\bSVC\b.*\bnot\b.*\bfitted\b"):
clf.predict(X)
clf = svm.NuSVR()
with pytest.raises(Exception, match=r".*\bNuSVR\b.*\bnot\b.*\bfitted\b"):
clf.predict(X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svm_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(random_state=0, max_iter=2)
warning_msg = "Liblinear failed to converge, increase the number of iterations."
with pytest.warns(ConvergenceWarning, match=warning_msg):
lsvc.fit(X, Y)
# Check that we have an n_iter_ attribute with int type as opposed to a
# numpy array or an np.int32 so as to match the docstring.
assert isinstance(lsvc.n_iter_, int)
assert lsvc.n_iter_ == 2
lsvr = svm.LinearSVR(random_state=0, max_iter=2)
with pytest.warns(ConvergenceWarning, match=warning_msg):
lsvr.fit(iris.data, iris.target)
assert isinstance(lsvr.n_iter_, int)
assert lsvr.n_iter_ == 2
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel="linear"), svm.NuSVR(kernel="linear"), svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(
svr.predict(X), np.dot(X, svr.coef_.ravel()) + svr.intercept_
)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert lsvc.intercept_ == 0.0
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert hasattr(G, "predict_proba")
G.fit(iris.data, iris.target)
assert hasattr(G, "predict_proba")
G = svm.SVC(probability=False)
assert not hasattr(G, "predict_proba")
G.fit(iris.data, iris.target)
assert not hasattr(G, "predict_proba")
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert hasattr(G, "predict_proba")
msg = "predict_proba is not available when fitted with probability=False"
with pytest.raises(NotFittedError, match=msg):
G.predict_proba(iris.data)
def test_decision_function_shape_two_class():
for n_classes in [2, 3]:
X, y = make_blobs(centers=n_classes, random_state=0)
for estimator in [svm.SVC, svm.NuSVC]:
clf = OneVsRestClassifier(estimator(decision_function_shape="ovr")).fit(
X, y
)
assert len(clf.predict(X)) == len(y)
def test_ovr_decision_function():
# One point from each quadrant represents one class
X_train = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]])
y_train = [0, 1, 2, 3]
# First point is closer to the decision boundaries than the second point
base_points = np.array([[5, 5], [10, 10]])
# For all the quadrants (classes)
X_test = np.vstack(
(
base_points * [1, 1], # Q1
base_points * [-1, 1], # Q2
base_points * [-1, -1], # Q3
base_points * [1, -1], # Q4
)
)
y_test = [0] * 2 + [1] * 2 + [2] * 2 + [3] * 2
clf = svm.SVC(kernel="linear", decision_function_shape="ovr")
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# Test if the prediction is the same as y
assert_array_equal(y_pred, y_test)
deci_val = clf.decision_function(X_test)
# Assert that the predicted class has the maximum value
assert_array_equal(np.argmax(deci_val, axis=1), y_pred)
# Get decision value at test points for the predicted class
pred_class_deci_val = deci_val[range(8), y_pred].reshape((4, 2))
# Assert pred_class_deci_val > 0 here
assert np.min(pred_class_deci_val) > 0.0
# Test if the first point has lower decision value on every quadrant
# compared to the second point
assert np.all(pred_class_deci_val[:, 0] < pred_class_deci_val[:, 1])
@pytest.mark.parametrize("SVCClass", [svm.SVC, svm.NuSVC])
def test_svc_invalid_break_ties_param(SVCClass):
X, y = make_blobs(random_state=42)
svm = SVCClass(
kernel="linear", decision_function_shape="ovo", break_ties=True, random_state=42
).fit(X, y)
with pytest.raises(ValueError, match="break_ties must be False"):
svm.predict(y)
@pytest.mark.parametrize("SVCClass", [svm.SVC, svm.NuSVC])
def test_svc_ovr_tie_breaking(SVCClass):
"""Test if predict breaks ties in OVR mode.
Related issue: https://github.com/scikit-learn/scikit-learn/issues/8277
"""
X, y = make_blobs(random_state=0, n_samples=20, n_features=2)
xs = np.linspace(X[:, 0].min(), X[:, 0].max(), 100)
ys = np.linspace(X[:, 1].min(), X[:, 1].max(), 100)
xx, yy = np.meshgrid(xs, ys)
common_params = dict(
kernel="rbf", gamma=1e6, random_state=42, decision_function_shape="ovr"
)
svm = SVCClass(
break_ties=False,
**common_params,
).fit(X, y)
pred = svm.predict(np.c_[xx.ravel(), yy.ravel()])
dv = svm.decision_function(np.c_[xx.ravel(), yy.ravel()])
assert not np.all(pred == np.argmax(dv, axis=1))
svm = SVCClass(
break_ties=True,
**common_params,
).fit(X, y)
pred = svm.predict(np.c_[xx.ravel(), yy.ravel()])
dv = svm.decision_function(np.c_[xx.ravel(), yy.ravel()])
assert np.all(pred == np.argmax(dv, axis=1))
def test_gamma_scale():
X, y = [[0.0], [1.0]], [0, 1]
clf = svm.SVC()
clf.fit(X, y)
assert_almost_equal(clf._gamma, 4)
@pytest.mark.parametrize(
"SVM, params",
[
(LinearSVC, {"penalty": "l1", "loss": "squared_hinge", "dual": False}),
(LinearSVC, {"penalty": "l2", "loss": "squared_hinge", "dual": True}),
(LinearSVC, {"penalty": "l2", "loss": "squared_hinge", "dual": False}),
(LinearSVC, {"penalty": "l2", "loss": "hinge", "dual": True}),
(LinearSVR, {"loss": "epsilon_insensitive", "dual": True}),
(LinearSVR, {"loss": "squared_epsilon_insensitive", "dual": True}),
(LinearSVR, {"loss": "squared_epsilon_insensitive", "dual": True}),
],
)
def test_linearsvm_liblinear_sample_weight(SVM, params):
X = np.array(
[
[1, 3],
[1, 3],
[1, 3],
[1, 3],
[2, 1],
[2, 1],
[2, 1],
[2, 1],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[4, 1],
[4, 1],
[4, 1],
[4, 1],
],
dtype=np.dtype("float"),
)
y = np.array(
[1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=np.dtype("int")
)
X2 = np.vstack([X, X])
y2 = np.hstack([y, 3 - y])
sample_weight = np.ones(shape=len(y) * 2)
sample_weight[len(y) :] = 0
X2, y2, sample_weight = shuffle(X2, y2, sample_weight, random_state=0)
base_estimator = SVM(random_state=42)
base_estimator.set_params(**params)
base_estimator.set_params(tol=1e-12, max_iter=1000)
est_no_weight = base.clone(base_estimator).fit(X, y)
est_with_weight = base.clone(base_estimator).fit(
X2, y2, sample_weight=sample_weight
)
for method in ("predict", "decision_function"):
if hasattr(base_estimator, method):
X_est_no_weight = getattr(est_no_weight, method)(X)
X_est_with_weight = getattr(est_with_weight, method)(X)
assert_allclose(X_est_no_weight, X_est_with_weight)
@pytest.mark.parametrize("Klass", (OneClassSVM, SVR, NuSVR))
def test_n_support(Klass):
# Make n_support is correct for oneclass and SVR (used to be
# non-initialized)
# this is a non regression test for issue #14774
X = np.array([[0], [0.44], [0.45], [0.46], [1]])
y = np.arange(X.shape[0])
est = Klass()
assert not hasattr(est, "n_support_")
est.fit(X, y)
assert est.n_support_[0] == est.support_vectors_.shape[0]
assert est.n_support_.size == 1
@pytest.mark.parametrize("Estimator", [svm.SVC, svm.SVR])
def test_custom_kernel_not_array_input(Estimator):
"""Test using a custom kernel that is not fed with array-like for floats"""
data = ["A A", "A", "B", "B B", "A B"]
X = np.array([[2, 0], [1, 0], [0, 1], [0, 2], [1, 1]]) # count encoding
y = np.array([1, 1, 2, 2, 1])
def string_kernel(X1, X2):
assert isinstance(X1[0], str)
n_samples1 = _num_samples(X1)
n_samples2 = _num_samples(X2)
K = np.zeros((n_samples1, n_samples2))
for ii in range(n_samples1):
for jj in range(ii, n_samples2):
K[ii, jj] = X1[ii].count("A") * X2[jj].count("A")
K[ii, jj] += X1[ii].count("B") * X2[jj].count("B")
K[jj, ii] = K[ii, jj]
return K
K = string_kernel(data, data)
assert_array_equal(np.dot(X, X.T), K)
svc1 = Estimator(kernel=string_kernel).fit(data, y)
svc2 = Estimator(kernel="linear").fit(X, y)
svc3 = Estimator(kernel="precomputed").fit(K, y)
assert svc1.score(data, y) == svc3.score(K, y)
assert svc1.score(data, y) == svc2.score(X, y)
if hasattr(svc1, "decision_function"): # classifier
assert_allclose(svc1.decision_function(data), svc2.decision_function(X))
assert_allclose(svc1.decision_function(data), svc3.decision_function(K))
assert_array_equal(svc1.predict(data), svc2.predict(X))
assert_array_equal(svc1.predict(data), svc3.predict(K))
else: # regressor
assert_allclose(svc1.predict(data), svc2.predict(X))
assert_allclose(svc1.predict(data), svc3.predict(K))
def test_svc_raises_error_internal_representation():
"""Check that SVC raises error when internal representation is altered.
Non-regression test for #18891 and https://nvd.nist.gov/vuln/detail/CVE-2020-28975
"""
clf = svm.SVC(kernel="linear").fit(X, Y)
clf._n_support[0] = 1000000
msg = "The internal representation of SVC was altered"
with pytest.raises(ValueError, match=msg):
clf.predict(X)
@pytest.mark.parametrize(
"estimator, expected_n_iter_type",
[
(svm.SVC, np.ndarray),
(svm.NuSVC, np.ndarray),
(svm.SVR, int),
(svm.NuSVR, int),
(svm.OneClassSVM, int),
],
)
@pytest.mark.parametrize(
"dataset",
[
make_classification(n_classes=2, n_informative=2, random_state=0),
make_classification(n_classes=3, n_informative=3, random_state=0),
make_classification(n_classes=4, n_informative=4, random_state=0),
],
)
def test_n_iter_libsvm(estimator, expected_n_iter_type, dataset):
# Check that the type of n_iter_ is correct for the classes that inherit
# from BaseSVC.
# Note that for SVC, and NuSVC this is an ndarray; while for SVR, NuSVR, and
# OneClassSVM, it is an int.
# For SVC and NuSVC also check the shape of n_iter_.
X, y = dataset
n_iter = estimator(kernel="linear").fit(X, y).n_iter_
assert type(n_iter) == expected_n_iter_type
if estimator in [svm.SVC, svm.NuSVC]:
n_classes = len(np.unique(y))
assert n_iter.shape == (n_classes * (n_classes - 1) // 2,)
# TODO(1.4): Remove
@pytest.mark.parametrize("Klass", [SVR, NuSVR, OneClassSVM])
def test_svm_class_weights_deprecation(Klass):
clf = Klass()
with warnings.catch_warnings():
warnings.simplefilter("error", FutureWarning)
clf.fit(X, Y)
msg = (
"Attribute `class_weight_` was deprecated in version 1.2 and will be removed"
" in 1.4"
)
with pytest.warns(FutureWarning, match=re.escape(msg)):
getattr(clf, "class_weight_")
| bsd-3-clause |
theflofly/tensorflow | tensorflow/contrib/learn/python/learn/datasets/base.py | 39 | 8304 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base utilities for loading datasets (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
from os import path
import random
import time
import numpy as np
from six.moves import urllib
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
Dataset = collections.namedtuple('Dataset', ['data', 'target'])
Datasets = collections.namedtuple('Datasets', ['train', 'validation', 'test'])
@deprecated(None, 'Use tf.data instead.')
def load_csv_with_header(filename,
target_dtype,
features_dtype,
target_column=-1):
"""Load dataset from CSV file with a header row."""
with gfile.Open(filename) as csv_file:
data_file = csv.reader(csv_file)
header = next(data_file)
n_samples = int(header[0])
n_features = int(header[1])
data = np.zeros((n_samples, n_features), dtype=features_dtype)
target = np.zeros((n_samples,), dtype=target_dtype)
for i, row in enumerate(data_file):
target[i] = np.asarray(row.pop(target_column), dtype=target_dtype)
data[i] = np.asarray(row, dtype=features_dtype)
return Dataset(data=data, target=target)
@deprecated(None, 'Use tf.data instead.')
def load_csv_without_header(filename,
target_dtype,
features_dtype,
target_column=-1):
"""Load dataset from CSV file without a header row."""
with gfile.Open(filename) as csv_file:
data_file = csv.reader(csv_file)
data, target = [], []
for row in data_file:
target.append(row.pop(target_column))
data.append(np.asarray(row, dtype=features_dtype))
target = np.array(target, dtype=target_dtype)
data = np.array(data)
return Dataset(data=data, target=target)
@deprecated(None, 'Use tf.data instead.')
def shrink_csv(filename, ratio):
"""Create a smaller dataset of only 1/ratio of original data."""
filename_small = filename.replace('.', '_small.')
with gfile.Open(filename_small, 'w') as csv_file_small:
writer = csv.writer(csv_file_small)
with gfile.Open(filename) as csv_file:
reader = csv.reader(csv_file)
i = 0
for row in reader:
if i % ratio == 0:
writer.writerow(row)
i += 1
@deprecated(None, 'Use scikits.learn.datasets.')
def load_iris(data_path=None):
"""Load Iris dataset.
Args:
data_path: string, path to iris dataset (optional)
Returns:
Dataset object containing data in-memory.
"""
if data_path is None:
module_path = path.dirname(__file__)
data_path = path.join(module_path, 'data', 'iris.csv')
return load_csv_with_header(
data_path, target_dtype=np.int, features_dtype=np.float)
@deprecated(None, 'Use scikits.learn.datasets.')
def load_boston(data_path=None):
"""Load Boston housing dataset.
Args:
data_path: string, path to boston dataset (optional)
Returns:
Dataset object containing data in-memory.
"""
if data_path is None:
module_path = path.dirname(__file__)
data_path = path.join(module_path, 'data', 'boston_house_prices.csv')
return load_csv_with_header(
data_path, target_dtype=np.float, features_dtype=np.float)
@deprecated(None, 'Use the retry module or similar alternatives.')
def retry(initial_delay,
max_delay,
factor=2.0,
jitter=0.25,
is_retriable=None):
"""Simple decorator for wrapping retriable functions.
Args:
initial_delay: the initial delay.
max_delay: the maximum delay allowed (actual max is
max_delay * (1 + jitter).
factor: each subsequent retry, the delay is multiplied by this value.
(must be >= 1).
jitter: to avoid lockstep, the returned delay is multiplied by a random
number between (1-jitter) and (1+jitter). To add a 20% jitter, set
jitter = 0.2. Must be < 1.
is_retriable: (optional) a function that takes an Exception as an argument
and returns true if retry should be applied.
Returns:
A function that wraps another function to automatically retry it.
"""
return _internal_retry(
initial_delay=initial_delay,
max_delay=max_delay,
factor=factor,
jitter=jitter,
is_retriable=is_retriable)
def _internal_retry(initial_delay,
max_delay,
factor=2.0,
jitter=0.25,
is_retriable=None):
"""Simple decorator for wrapping retriable functions, for internal use only.
Args:
initial_delay: the initial delay.
max_delay: the maximum delay allowed (actual max is
max_delay * (1 + jitter).
factor: each subsequent retry, the delay is multiplied by this value.
(must be >= 1).
jitter: to avoid lockstep, the returned delay is multiplied by a random
number between (1-jitter) and (1+jitter). To add a 20% jitter, set
jitter = 0.2. Must be < 1.
is_retriable: (optional) a function that takes an Exception as an argument
and returns true if retry should be applied.
Returns:
A function that wraps another function to automatically retry it.
"""
if factor < 1:
raise ValueError('factor must be >= 1; was %f' % (factor,))
if jitter >= 1:
raise ValueError('jitter must be < 1; was %f' % (jitter,))
# Generator to compute the individual delays
def delays():
delay = initial_delay
while delay <= max_delay:
yield delay * random.uniform(1 - jitter, 1 + jitter)
delay *= factor
def wrap(fn):
"""Wrapper function factory invoked by decorator magic."""
def wrapped_fn(*args, **kwargs):
"""The actual wrapper function that applies the retry logic."""
for delay in delays():
try:
return fn(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
if is_retriable is None:
continue
if is_retriable(e):
time.sleep(delay)
else:
raise
return fn(*args, **kwargs)
return wrapped_fn
return wrap
_RETRIABLE_ERRNOS = {
110, # Connection timed out [socket.py]
}
def _is_retriable(e):
return isinstance(e, IOError) and e.errno in _RETRIABLE_ERRNOS
@deprecated(None, 'Please use urllib or similar directly.')
@_internal_retry(initial_delay=1.0, max_delay=16.0, is_retriable=_is_retriable)
def urlretrieve_with_retry(url, filename=None):
return urllib.request.urlretrieve(url, filename)
@deprecated(None, 'Please write your own downloading logic.')
def maybe_download(filename, work_directory, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not gfile.Exists(work_directory):
gfile.MakeDirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not gfile.Exists(filepath):
temp_file_name, _ = urlretrieve_with_retry(source_url)
gfile.Copy(temp_file_name, filepath)
with gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
| apache-2.0 |
navigator8972/vae_dyn | encode_test.py | 1 | 3216 | '''
A script to test the completion of image sequences
'''
import tensorflow as tf
import sys
import os
import cPickle
from model_vaedyn import VAEDYN
import numpy as np
from train_vaedyn import next_batch
import dataset
import utils
sample_mode = True
if len(sys.argv) == 1:
#if no model folder is specified
with open(os.path.join('save-vaedyn', 'config.pkl')) as f:
saved_args = cPickle.load(f)
else:
#use the specified model
with open(os.path.join(sys.argv[1], 'config.pkl')) as f:
saved_args = cPickle.load(f)
model = VAEDYN(saved_args, sample_mode)
saver = tf.train.Saver(tf.global_variables())
# saver = tf.train.Saver()
sess = tf.InteractiveSession()
if len(sys.argv) == 1:
#if no model folder is specified
ckpt = tf.train.get_checkpoint_state('save-vaedyn')
model_checkpoint_path = ckpt.model_checkpoint_path
elif len(sys.argv) == 2:
ckpt = tf.train.get_checkpoint_state(sys.argv[1])
model_checkpoint_path = ckpt.model_checkpoint_path
elif len(sys.argv) == 3:
model_checkpoint_path = os.path.join(sys.argv[1], sys.argv[2])
print "loading model: ",model_checkpoint_path
saver.restore(sess, model_checkpoint_path)
#prepare seed to have stable shuffle
np.random.seed(1234)
tf.set_random_seed(1234)
#prepare dataset
print 'Constructing dataset...'
image_seq_data = utils.extract_image_sequences(fname='bin/extracted_data_image_seq.pkl', only_digits=False)
print image_seq_data.shape
image_seq_dataset = dataset.construct_datasets(image_seq_data)
#encode a sequence of images
tol_len = 20
seeding_len = 14
seq_samples = image_seq_dataset.test._data
compl_samples = seq_samples.copy()
for idx, seq_sample in enumerate(seq_samples):
print 'Processing test sample {0}...'.format(idx)
seq_sample_formatted = np.array([seq_sample])
prior_mus, prior_sigmas, enc_mus, enc_sigmas, final_state_c, final_state_h = model.encode(sess,saved_args, seq=seq_sample_formatted[0, :seeding_len, :])
#synthesis conditioned on the encoding
sample_data,mus = model.sample(sess,saved_args, num=tol_len-seeding_len, start=[seq_sample_formatted[0, seeding_len-1, :], (final_state_c, final_state_h)])
# compl_data = np.concatenate([seq_sample_formatted[:, :seeding_len, :], sample_data], axis=1)
compl_samples[idx, seeding_len:tol_len, :] = sample_data
compl_diff = compl_samples[:, seeding_len:tol_len, :] - seq_samples[:, seeding_len:tol_len, :]
#errors
squre_errors = np.sum(np.sum((compl_diff)**2, axis=2), axis=1) / float((tol_len-seeding_len))
crossentropy_diff = seq_samples[:, seeding_len:tol_len, :] * np.log(1e-5 + compl_samples[:, seeding_len:tol_len, :]) \
+ (1-seq_samples[:, seeding_len:tol_len, :]) * np.log(1e-5 + 1 - compl_samples[:, seeding_len:tol_len, :] )
crossentropy_errors = np.sum(np.sum(-crossentropy_diff, axis=2), axis=1) / float((tol_len-seeding_len))
mean_square_errors = np.mean(squre_errors)
std_square_errors = np.std(squre_errors)
mean_ce_errors = np.mean(crossentropy_errors)
std_ce_errors = np.std(crossentropy_errors)
print 'Square Error Mean: ', mean_square_errors, '; Std: ', std_square_errors
print 'Cross Entropy Error Mean: ', mean_ce_errors, '; Std: ', std_ce_errors
| bsd-3-clause |
espg/scikit-learn | sklearn/linear_model/_quantile.py | 9 | 11170 | # Authors: David Dale <dale.david@mail.ru>
# Christian Lorentzen <lorentzen.ch@gmail.com>
# License: BSD 3 clause
import warnings
from numbers import Real
import numpy as np
from scipy import sparse
from scipy.optimize import linprog
from ..base import BaseEstimator, RegressorMixin
from ._base import LinearModel
from ..exceptions import ConvergenceWarning
from ..utils import _safe_indexing
from ..utils.validation import _check_sample_weight
from ..utils.fixes import sp_version, parse_version
from ..utils._param_validation import Hidden, Interval, StrOptions
class QuantileRegressor(LinearModel, RegressorMixin, BaseEstimator):
"""Linear regression model that predicts conditional quantiles.
The linear :class:`QuantileRegressor` optimizes the pinball loss for a
desired `quantile` and is robust to outliers.
This model uses an L1 regularization like
:class:`~sklearn.linear_model.Lasso`.
Read more in the :ref:`User Guide <quantile_regression>`.
.. versionadded:: 1.0
Parameters
----------
quantile : float, default=0.5
The quantile that the model tries to predict. It must be strictly
between 0 and 1. If 0.5 (default), the model predicts the 50%
quantile, i.e. the median.
alpha : float, default=1.0
Regularization constant that multiplies the L1 penalty term.
fit_intercept : bool, default=True
Whether or not to fit the intercept.
solver : {'highs-ds', 'highs-ipm', 'highs', 'interior-point', \
'revised simplex'}, default='interior-point'
Method used by :func:`scipy.optimize.linprog` to solve the linear
programming formulation.
From `scipy>=1.6.0`, it is recommended to use the highs methods because
they are the fastest ones. Solvers "highs-ds", "highs-ipm" and "highs"
support sparse input data and, in fact, always convert to sparse csc.
From `scipy>=1.11.0`, "interior-point" is not available anymore.
.. versionchanged:: 1.4
The default of `solver` will change to `"highs"` in version 1.4.
solver_options : dict, default=None
Additional parameters passed to :func:`scipy.optimize.linprog` as
options. If `None` and if `solver='interior-point'`, then
`{"lstsq": True}` is passed to :func:`scipy.optimize.linprog` for the
sake of stability.
Attributes
----------
coef_ : array of shape (n_features,)
Estimated coefficients for the features.
intercept_ : float
The intercept of the model, aka bias term.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
The actual number of iterations performed by the solver.
See Also
--------
Lasso : The Lasso is a linear model that estimates sparse coefficients
with l1 regularization.
HuberRegressor : Linear regression model that is robust to outliers.
Examples
--------
>>> from sklearn.linear_model import QuantileRegressor
>>> import numpy as np
>>> n_samples, n_features = 10, 2
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> # the two following lines are optional in practice
>>> from sklearn.utils.fixes import sp_version, parse_version
>>> solver = "highs" if sp_version >= parse_version("1.6.0") else "interior-point"
>>> reg = QuantileRegressor(quantile=0.8, solver=solver).fit(X, y)
>>> np.mean(y <= reg.predict(X))
0.8
"""
_parameter_constraints: dict = {
"quantile": [Interval(Real, 0, 1, closed="neither")],
"alpha": [Interval(Real, 0, None, closed="left")],
"fit_intercept": ["boolean"],
"solver": [
StrOptions(
{
"highs-ds",
"highs-ipm",
"highs",
"interior-point",
"revised simplex",
}
),
Hidden(StrOptions({"warn"})),
],
"solver_options": [dict, None],
}
def __init__(
self,
*,
quantile=0.5,
alpha=1.0,
fit_intercept=True,
solver="warn",
solver_options=None,
):
self.quantile = quantile
self.alpha = alpha
self.fit_intercept = fit_intercept
self.solver = solver
self.solver_options = solver_options
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
Returns self.
"""
self._validate_params()
X, y = self._validate_data(
X,
y,
accept_sparse=["csc", "csr", "coo"],
y_numeric=True,
multi_output=False,
)
sample_weight = _check_sample_weight(sample_weight, X)
n_features = X.shape[1]
n_params = n_features
if self.fit_intercept:
n_params += 1
# Note that centering y and X with _preprocess_data does not work
# for quantile regression.
# The objective is defined as 1/n * sum(pinball loss) + alpha * L1.
# So we rescale the penalty term, which is equivalent.
alpha = np.sum(sample_weight) * self.alpha
if self.solver == "warn":
warnings.warn(
"The default solver will change from 'interior-point' to 'highs' in "
"version 1.4. Set `solver='highs'` or to the desired solver to silence "
"this warning.",
FutureWarning,
)
solver = "interior-point"
elif self.solver in (
"highs-ds",
"highs-ipm",
"highs",
) and sp_version < parse_version("1.6.0"):
raise ValueError(
f"Solver {self.solver} is only available "
f"with scipy>=1.6.0, got {sp_version}"
)
else:
solver = self.solver
if solver == "interior-point" and sp_version >= parse_version("1.11.0"):
raise ValueError(
f"Solver {solver} is not anymore available in SciPy >= 1.11.0."
)
if sparse.issparse(X) and solver not in ["highs", "highs-ds", "highs-ipm"]:
raise ValueError(
f"Solver {self.solver} does not support sparse X. "
"Use solver 'highs' for example."
)
# make default solver more stable
if self.solver_options is None and solver == "interior-point":
solver_options = {"lstsq": True}
else:
solver_options = self.solver_options
# After rescaling alpha, the minimization problem is
# min sum(pinball loss) + alpha * L1
# Use linear programming formulation of quantile regression
# min_x c x
# A_eq x = b_eq
# 0 <= x
# x = (s0, s, t0, t, u, v) = slack variables >= 0
# intercept = s0 - t0
# coef = s - t
# c = (0, alpha * 1_p, 0, alpha * 1_p, quantile * 1_n, (1-quantile) * 1_n)
# residual = y - X@coef - intercept = u - v
# A_eq = (1_n, X, -1_n, -X, diag(1_n), -diag(1_n))
# b_eq = y
# p = n_features
# n = n_samples
# 1_n = vector of length n with entries equal one
# see https://stats.stackexchange.com/questions/384909/
#
# Filtering out zero sample weights from the beginning makes life
# easier for the linprog solver.
indices = np.nonzero(sample_weight)[0]
n_indices = len(indices) # use n_mask instead of n_samples
if n_indices < len(sample_weight):
sample_weight = sample_weight[indices]
X = _safe_indexing(X, indices)
y = _safe_indexing(y, indices)
c = np.concatenate(
[
np.full(2 * n_params, fill_value=alpha),
sample_weight * self.quantile,
sample_weight * (1 - self.quantile),
]
)
if self.fit_intercept:
# do not penalize the intercept
c[0] = 0
c[n_params] = 0
if solver in ["highs", "highs-ds", "highs-ipm"]:
# Note that highs methods always use a sparse CSC memory layout internally,
# even for optimization problems parametrized using dense numpy arrays.
# Therefore, we work with CSC matrices as early as possible to limit
# unnecessary repeated memory copies.
eye = sparse.eye(n_indices, dtype=X.dtype, format="csc")
if self.fit_intercept:
ones = sparse.csc_matrix(np.ones(shape=(n_indices, 1), dtype=X.dtype))
A_eq = sparse.hstack([ones, X, -ones, -X, eye, -eye], format="csc")
else:
A_eq = sparse.hstack([X, -X, eye, -eye], format="csc")
else:
eye = np.eye(n_indices)
if self.fit_intercept:
ones = np.ones((n_indices, 1))
A_eq = np.concatenate([ones, X, -ones, -X, eye, -eye], axis=1)
else:
A_eq = np.concatenate([X, -X, eye, -eye], axis=1)
b_eq = y
result = linprog(
c=c,
A_eq=A_eq,
b_eq=b_eq,
method=solver,
options=solver_options,
)
solution = result.x
if not result.success:
failure = {
1: "Iteration limit reached.",
2: "Problem appears to be infeasible.",
3: "Problem appears to be unbounded.",
4: "Numerical difficulties encountered.",
}
warnings.warn(
"Linear programming for QuantileRegressor did not succeed.\n"
f"Status is {result.status}: "
+ failure.setdefault(result.status, "unknown reason")
+ "\n"
+ "Result message of linprog:\n"
+ result.message,
ConvergenceWarning,
)
# positive slack - negative slack
# solution is an array with (params_pos, params_neg, u, v)
params = solution[:n_params] - solution[n_params : 2 * n_params]
self.n_iter_ = result.nit
if self.fit_intercept:
self.coef_ = params[1:]
self.intercept_ = params[0]
else:
self.coef_ = params
self.intercept_ = 0.0
return self
| bsd-3-clause |
bzero/statsmodels | statsmodels/tsa/tests/test_adfuller_lag.py | 32 | 1882 | # -*- coding: utf-8 -*-
"""Test for autolag of adfuller, unitroot_adf
Created on Wed May 30 21:39:46 2012
Author: Josef Perktold
"""
from statsmodels.compat.python import iteritems
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
import statsmodels.tsa.stattools as tsast
from statsmodels.datasets import macrodata
def test_adf_autolag():
#see issue #246
#this is mostly a unit test
d2 = macrodata.load().data
for k_trend, tr in enumerate(['nc', 'c', 'ct', 'ctt']):
#[None:'nc', 0:'c', 1:'ct', 2:'ctt']
x = np.log(d2['realgdp'])
xd = np.diff(x)
#check exog
adf3 = tsast.adfuller(x, maxlag=None, autolag='aic',
regression=tr, store=True, regresults=True)
st2 = adf3[-1]
assert_equal(len(st2.autolag_results), 15 + 1) #+1 for lagged level
for l, res in sorted(iteritems(st2.autolag_results))[:5]:
lag = l-k_trend
#assert correct design matrices in _autolag
assert_equal(res.model.exog[-10:,k_trend], x[-11:-1])
assert_equal(res.model.exog[-1,k_trend+1:], xd[-lag:-1][::-1])
#min-ic lag of dfgls in Stata is also 2, or 9 for maic with notrend
assert_equal(st2.usedlag, 2)
#same result with lag fixed at usedlag of autolag
adf2 = tsast.adfuller(x, maxlag=2, autolag=None, regression=tr)
assert_almost_equal(adf3[:2], adf2[:2], decimal=12)
tr = 'c'
#check maxlag with autolag
adf3 = tsast.adfuller(x, maxlag=5, autolag='aic',
regression=tr, store=True, regresults=True)
assert_equal(len(adf3[-1].autolag_results), 5 + 1)
adf3 = tsast.adfuller(x, maxlag=0, autolag='aic',
regression=tr, store=True, regresults=True)
assert_equal(len(adf3[-1].autolag_results), 0 + 1)
| bsd-3-clause |
nelson-liu/scikit-learn | sklearn/model_selection/tests/test_search.py | 3 | 47092 | """Test the search module"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from sklearn.externals.joblib._compat import PY3_OR_LATER
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.fixes import in1d
from sklearn.utils.fixes import sp_version
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.exceptions import NotFittedError
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection.tests.common import OneTimeSplitter
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
predict_log_proba = predict
decision_function = predict
transform = predict
inverse_transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
assert_array_equal(grid_search.cv_results_["param_foo_param"].data,
[1, 2, 3])
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = search_no_scoring.score(X, y)
score_accuracy = search_accuracy.score(X, y)
score_no_score_auc = search_no_score_method_auc.score(X, y)
score_auc = search_auc.score(X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_groups():
# Check if ValueError (when groups is None) propagates to GridSearchCV
# And also check if groups is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
groups = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(),
GroupShuffleSplit()]
for cv in group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The groups parameter should not be None",
gs.fit, X, y)
gs.fit(X, y, groups=groups)
non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_trivial_cv_results_attr():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_no_refit():
# Test that GSCV can be used for model selection alone without refitting
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(not hasattr(grid_search, "best_estimator_") and
hasattr(grid_search, "best_index_") and
hasattr(grid_search, "best_params_"))
# Make sure the predict/transform etc fns raise meaningfull error msg
for fn_name in ('predict', 'predict_proba', 'predict_log_proba',
'transform', 'inverse_transform'):
assert_raise_message(NotFittedError,
('refit=False. %s is available only after '
'refitting on the best parameters' % fn_name),
getattr(grid_search, fn_name), X)
def test_grid_search_error():
# Test that grid search will capture errors on data with different length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_when_param_grid_includes_range():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = None
if PY3_OR_LATER:
grid_search = GridSearchCV(clf, {'foo_param': range(1, 4)})
else:
grid_search = GridSearchCV(clf, {'foo_param': xrange(1, 4)})
grid_search.fit(X, y)
assert_equal(grid_search.best_estimator_.foo_param, 2)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a sequence"
"(but not a string) or np.ndarray.",
GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a non-empty sequence.",
GridSearchCV, clf, param_dict)
param_dict = {"C": "1,2,3"}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a sequence"
"(but not a string) or np.ndarray.",
GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
@ignore_warnings
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "cv_results_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
def check_cv_results_array_types(cv_results, param_keys, score_keys):
# Check if the search `cv_results`'s array are of correct types
assert_true(all(isinstance(cv_results[param], np.ma.MaskedArray)
for param in param_keys))
assert_true(all(cv_results[key].dtype == object for key in param_keys))
assert_false(any(isinstance(cv_results[key], np.ma.MaskedArray)
for key in score_keys))
assert_true(all(cv_results[key].dtype == np.float64
for key in score_keys if not key.startswith('rank')))
assert_true(cv_results['rank_test_score'].dtype == np.int32)
def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand):
# Test the search.cv_results_ contains all the required results
assert_array_equal(sorted(cv_results.keys()),
sorted(param_keys + score_keys + ('params',)))
assert_true(all(cv_results[key].shape == (n_cand,)
for key in param_keys + score_keys))
def check_cv_results_grid_scores_consistency(search):
# TODO Remove in 0.20
cv_results = search.cv_results_
res_scores = np.vstack(list([cv_results["split%d_test_score" % i]
for i in range(search.n_splits_)])).T
res_means = cv_results["mean_test_score"]
res_params = cv_results["params"]
n_cand = len(res_params)
grid_scores = assert_warns(DeprecationWarning, getattr,
search, 'grid_scores_')
assert_equal(len(grid_scores), n_cand)
# Check consistency of the structure of grid_scores
for i in range(n_cand):
assert_equal(grid_scores[i].parameters, res_params[i])
assert_array_equal(grid_scores[i].cv_validation_scores,
res_scores[i, :])
assert_array_equal(grid_scores[i].mean_validation_score, res_means[i])
def test_grid_search_cv_results():
X, y = make_classification(n_samples=50, n_features=4,
random_state=42)
n_splits = 3
n_grid_points = 6
params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]),
dict(kernel=['poly', ], degree=[1, 2])]
grid_search = GridSearchCV(SVC(), cv=n_splits, iid=False,
param_grid=params)
grid_search.fit(X, y)
grid_search_iid = GridSearchCV(SVC(), cv=n_splits, iid=True,
param_grid=params)
grid_search_iid.fit(X, y)
param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel')
score_keys = ('mean_test_score', 'mean_train_score',
'rank_test_score',
'split0_test_score', 'split1_test_score',
'split2_test_score',
'split0_train_score', 'split1_train_score',
'split2_train_score',
'std_test_score', 'std_train_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time')
n_candidates = n_grid_points
for search, iid in zip((grid_search, grid_search_iid), (False, True)):
assert_equal(iid, search.iid)
cv_results = search.cv_results_
# Check if score and timing are reasonable
assert_true(all(cv_results['rank_test_score'] >= 1))
assert_true(all(cv_results[k] >= 0) for k in score_keys
if k is not 'rank_test_score')
assert_true(all(cv_results[k] <= 1) for k in score_keys
if 'time' not in k and
k is not 'rank_test_score')
# Check cv_results structure
check_cv_results_array_types(cv_results, param_keys, score_keys)
check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates)
# Check masking
cv_results = grid_search.cv_results_
n_candidates = len(grid_search.cv_results_['params'])
assert_true(all((cv_results['param_C'].mask[i] and
cv_results['param_gamma'].mask[i] and
not cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'linear'))
assert_true(all((not cv_results['param_C'].mask[i] and
not cv_results['param_gamma'].mask[i] and
cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'rbf'))
check_cv_results_grid_scores_consistency(search)
def test_random_search_cv_results():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# scipy.stats dists now supports `seed` but we still support scipy 0.12
# which doesn't support the seed. Hence the assertions in the test for
# random_search alone should not depend on randomization.
n_splits = 3
n_search_iter = 30
params = dict(C=expon(scale=10), gamma=expon(scale=0.1))
random_search = RandomizedSearchCV(SVC(), n_iter=n_search_iter,
cv=n_splits, iid=False,
param_distributions=params)
random_search.fit(X, y)
random_search_iid = RandomizedSearchCV(SVC(), n_iter=n_search_iter,
cv=n_splits, iid=True,
param_distributions=params)
random_search_iid.fit(X, y)
param_keys = ('param_C', 'param_gamma')
score_keys = ('mean_test_score', 'mean_train_score',
'rank_test_score',
'split0_test_score', 'split1_test_score',
'split2_test_score',
'split0_train_score', 'split1_train_score',
'split2_train_score',
'std_test_score', 'std_train_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time')
n_cand = n_search_iter
for search, iid in zip((random_search, random_search_iid), (False, True)):
assert_equal(iid, search.iid)
cv_results = search.cv_results_
# Check results structure
check_cv_results_array_types(cv_results, param_keys, score_keys)
check_cv_results_keys(cv_results, param_keys, score_keys, n_cand)
# For random_search, all the param array vals should be unmasked
assert_false(any(cv_results['param_C'].mask) or
any(cv_results['param_gamma'].mask))
check_cv_results_grid_scores_consistency(search)
def test_search_iid_param():
# Test the IID parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(SVC(), param_grid={'C': [1, 10]}, cv=cv)
random_search = RandomizedSearchCV(SVC(), n_iter=2,
param_distributions={'C': [1, 10]},
cv=cv)
for search in (grid_search, random_search):
search.fit(X, y)
assert_true(search.iid)
test_cv_scores = np.array(list(search.cv_results_['split%d_test_score'
% s_i][0]
for s_i in range(search.n_splits_)))
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s_i][0]
for s_i in range(search.n_splits_)))
test_mean = search.cv_results_['mean_test_score'][0]
test_std = search.cv_results_['std_test_score'][0]
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s_i][0]
for s_i in range(search.n_splits_)))
train_mean = search.cv_results_['mean_train_score'][0]
train_std = search.cv_results_['std_train_score'][0]
# Test the first candidate
assert_equal(search.cv_results_['param_C'][0], 1)
assert_array_almost_equal(test_cv_scores, [1, 1. / 3.])
assert_array_almost_equal(train_cv_scores, [1, 1])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average and weighted std
expected_test_mean = 1 * 1. / 4. + 1. / 3. * 3. / 4.
expected_test_std = np.sqrt(1. / 4 * (expected_test_mean - 1) ** 2 +
3. / 4 * (expected_test_mean - 1. / 3.) **
2)
assert_almost_equal(test_mean, expected_test_mean)
assert_almost_equal(test_std, expected_test_std)
# For the train scores, we do not take a weighted mean irrespective of
# i.i.d. or not
assert_almost_equal(train_mean, 1)
assert_almost_equal(train_std, 0)
# once with iid=False
grid_search = GridSearchCV(SVC(),
param_grid={'C': [1, 10]},
cv=cv, iid=False)
random_search = RandomizedSearchCV(SVC(), n_iter=2,
param_distributions={'C': [1, 10]},
cv=cv, iid=False)
for search in (grid_search, random_search):
search.fit(X, y)
assert_false(search.iid)
test_cv_scores = np.array(list(search.cv_results_['split%d_test_score'
% s][0]
for s in range(search.n_splits_)))
test_mean = search.cv_results_['mean_test_score'][0]
test_std = search.cv_results_['std_test_score'][0]
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s][0]
for s in range(search.n_splits_)))
train_mean = search.cv_results_['mean_train_score'][0]
train_std = search.cv_results_['std_train_score'][0]
assert_equal(search.cv_results_['param_C'][0], 1)
# scores are the same as above
assert_array_almost_equal(test_cv_scores, [1, 1. / 3.])
# Unweighted mean/std is used
assert_almost_equal(test_mean, np.mean(test_cv_scores))
assert_almost_equal(test_std, np.std(test_cv_scores))
# For the train scores, we do not take a weighted mean irrespective of
# i.i.d. or not
assert_almost_equal(train_mean, 1)
assert_almost_equal(train_std, 0)
def test_search_cv_results_rank_tie_breaking():
X, y = make_blobs(n_samples=50, random_state=42)
# The two C values are close enough to give similar models
# which would result in a tie of their mean cv-scores
param_grid = {'C': [1, 1.001, 0.001]}
grid_search = GridSearchCV(SVC(), param_grid=param_grid)
random_search = RandomizedSearchCV(SVC(), n_iter=3,
param_distributions=param_grid)
for search in (grid_search, random_search):
search.fit(X, y)
cv_results = search.cv_results_
# Check tie breaking strategy -
# Check that there is a tie in the mean scores between
# candidates 1 and 2 alone
assert_almost_equal(cv_results['mean_test_score'][0],
cv_results['mean_test_score'][1])
assert_almost_equal(cv_results['mean_train_score'][0],
cv_results['mean_train_score'][1])
try:
assert_almost_equal(cv_results['mean_test_score'][1],
cv_results['mean_test_score'][2])
except AssertionError:
pass
try:
assert_almost_equal(cv_results['mean_train_score'][1],
cv_results['mean_train_score'][2])
except AssertionError:
pass
# 'min' rank should be assigned to the tied candidates
assert_almost_equal(search.cv_results_['rank_test_score'], [1, 1, 3])
def test_search_cv_results_none_param():
X, y = [[1], [2], [3], [4], [5]], [0, 0, 0, 0, 1]
estimators = (DecisionTreeRegressor(), DecisionTreeClassifier())
est_parameters = {"random_state": [0, None]}
cv = KFold(random_state=0)
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv).fit(X, y)
assert_array_equal(grid_search.cv_results_['param_random_state'],
[0, None])
@ignore_warnings()
def test_search_cv_timing():
svc = LinearSVC(random_state=0)
X = [[1, ], [2, ], [3, ], [4, ]]
y = [0, 1, 1, 0]
gs = GridSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0)
rs = RandomizedSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0, n_iter=2)
for search in (gs, rs):
search.fit(X, y)
for key in ['mean_fit_time', 'std_fit_time']:
# NOTE The precision of time.time in windows is not high
# enough for the fit/score times to be non-zero for trivial X and y
assert_true(np.all(search.cv_results_[key] >= 0))
assert_true(np.all(search.cv_results_[key] < 1))
for key in ['mean_score_time', 'std_score_time']:
assert_true(search.cv_results_[key][1] >= 0)
assert_true(search.cv_results_[key][0] == 0.0)
assert_true(np.all(search.cv_results_[key] < 1))
def test_grid_search_correct_score_results():
# test that correct scores are used
n_splits = 3
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score, cv=n_splits)
cv_results = grid_search.fit(X, y).cv_results_
# Test scorer names
result_keys = list(cv_results.keys())
expected_keys = (("mean_test_score", "rank_test_score") +
tuple("split%d_test_score" % cv_i
for cv_i in range(n_splits)))
assert_true(all(in1d(expected_keys, result_keys)))
cv = StratifiedKFold(n_splits=n_splits)
n_splits = grid_search.n_splits_
for candidate_i, C in enumerate(Cs):
clf.set_params(C=C)
cv_scores = np.array(
list(grid_search.cv_results_['split%d_test_score'
% s][candidate_i]
for s in range(n_splits)))
for i, (train, test) in enumerate(cv.split(X, y)):
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, cv_scores[i])
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
grid_search_pickled = pickle.loads(pickle.dumps(grid_search))
assert_array_almost_equal(grid_search.predict(X),
grid_search_pickled.predict(X))
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
random_search_pickled = pickle.loads(pickle.dumps(random_search))
assert_array_almost_equal(random_search.predict(X),
random_search_pickled.predict(X))
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
res_params = grid_search.cv_results_['params']
for cand_i in range(len(res_params)):
est.set_params(**res_params[cand_i])
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(
correct_score,
grid_search.cv_results_['split%d_test_score' % i][cand_i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
res_params = random_search.cv_results_['params']
for cand_i in range(len(res_params)):
est.set_params(**res_params[cand_i])
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(
correct_score,
random_search.cv_results_['split%d_test_score'
% i][cand_i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
n_candidates = len(gs.cv_results_['params'])
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
def get_cand_scores(i):
return np.array(list(gs.cv_results_['split%d_test_score' % s][i]
for s in range(gs.n_splits_)))
assert all((np.all(get_cand_scores(cand_i) == 0.0)
for cand_i in range(n_candidates)
if gs.cv_results_['param_parameter'][cand_i] ==
FailingClassifier.FAILING_PARAMETER))
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
n_candidates = len(gs.cv_results_['params'])
assert all(np.all(np.isnan(get_cand_scores(cand_i)))
for cand_i in range(n_candidates)
if gs.cv_results_['param_parameter'][cand_i] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
def test_stochastic_gradient_loss_param():
# Make sure the predict_proba works when loss is specified
# as one of the parameters in the param_grid.
param_grid = {
'loss': ['log'],
}
X = np.arange(24).reshape(6, -1)
y = [0, 0, 0, 1, 1, 1]
clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),
param_grid=param_grid)
# When the estimator is not fitted, `predict_proba` is not available as the
# loss is 'hinge'.
assert_false(hasattr(clf, "predict_proba"))
clf.fit(X, y)
clf.predict_proba(X)
clf.predict_log_proba(X)
# Make sure `predict_proba` is not available when setting loss=['hinge']
# in param_grid
param_grid = {
'loss': ['hinge'],
}
clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),
param_grid=param_grid)
assert_false(hasattr(clf, "predict_proba"))
clf.fit(X, y)
assert_false(hasattr(clf, "predict_proba"))
def test_search_train_scores_set_to_false():
X = np.arange(6).reshape(6, -1)
y = [0, 0, 0, 1, 1, 1]
clf = LinearSVC(random_state=0)
gs = GridSearchCV(clf, param_grid={'C': [0.1, 0.2]},
return_train_score=False)
gs.fit(X, y)
def test_grid_search_cv_splits_consistency():
# Check if a one time iterable is accepted as a cv parameter.
n_samples = 100
n_splits = 5
X, y = make_classification(n_samples=n_samples, random_state=0)
gs = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=OneTimeSplitter(n_splits=n_splits,
n_samples=n_samples))
gs.fit(X, y)
gs2 = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=KFold(n_splits=n_splits))
gs2.fit(X, y)
def _pop_time_keys(cv_results):
for key in ('mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time'):
cv_results.pop(key)
return cv_results
# OneTimeSplitter is a non-re-entrant cv where split can be called only
# once if ``cv.split`` is called once per param setting in GridSearchCV.fit
# the 2nd and 3rd parameter will not be evaluated as no train/test indices
# will be generated for the 2nd and subsequent cv.split calls.
# This is a check to make sure cv.split is not called once per param
# setting.
np.testing.assert_equal(_pop_time_keys(gs.cv_results_),
_pop_time_keys(gs2.cv_results_))
# Check consistency of folds across the parameters
gs = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.1, 0.2, 0.2]},
cv=KFold(n_splits=n_splits, shuffle=True))
gs.fit(X, y)
# As the first two param settings (C=0.1) and the next two param
# settings (C=0.2) are same, the test and train scores must also be
# same as long as the same train/test indices are generated for all
# the cv splits, for both param setting
for score_type in ('train', 'test'):
per_param_scores = {}
for param_i in range(4):
per_param_scores[param_i] = list(
gs.cv_results_['split%d_%s_score' % (s, score_type)][param_i]
for s in range(5))
assert_array_almost_equal(per_param_scores[0],
per_param_scores[1])
assert_array_almost_equal(per_param_scores[2],
per_param_scores[3])
| bsd-3-clause |
nelson-liu/scikit-learn | sklearn/manifold/tests/test_mds.py | 94 | 1873 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.manifold import mds
from sklearn.utils.testing import assert_raises
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
h2oai/h2o-3 | h2o-py/tests/testdir_sklearn/pyunit_sklearn_regression_pipeline.py | 2 | 6383 | from __future__ import print_function
import importlib, inspect, os, sys
import numpy as np
from sklearn.datasets import make_regression
from sklearn.decomposition import PCA
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
import h2o
from h2o.sklearn import H2OGradientBoostingEstimator, H2OGradientBoostingRegressor, H2OScaler, H2OPCA
from h2o.sklearn.wrapper import H2OConnectionMonitorMixin
sys.path.insert(1, os.path.join("..",".."))
from tests import pyunit_utils, Namespace as ns
"""
This test suite creates sklearn pipelines using either a mix of sklearn+H2O components,
or only H2O components.
Then, it feeds them with H2O frames (more efficient and ensures compatibility with old API.)
or with numpy arrays to provide the simplest approach for users wanting to use H2O like any sklearn estimator.
"""
seed = 2019
init_connection_args = dict(strict_version_check=False, show_progress=True)
scores = {}
def _get_data(format='numpy'):
X, y = make_regression(n_samples=1000, n_features=10, n_informative=5, random_state=seed)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=seed)
data = ns(X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test)
if format == 'h2o':
for k, v in data.__dict__.items():
setattr(data, k, h2o.H2OFrame(v))
return data
def test_h2o_only_pipeline_with_h2o_frames():
pipeline = Pipeline([
('standardize', H2OScaler()),
('pca', H2OPCA(k=2, seed=seed)),
('estimator', H2OGradientBoostingRegressor(seed=seed))
])
data = _get_data(format='h2o')
assert isinstance(data.X_train, h2o.H2OFrame)
pipeline.fit(data.X_train, data.y_train)
preds = pipeline.predict(data.X_test)
assert isinstance(preds, h2o.H2OFrame)
assert preds.dim == [len(data.X_test), 1]
# to get it working, we need to score a fresh H2OFrame
data = _get_data(format='h2o')
score = pipeline.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = r2_score(data.y_test.as_data_frame().values, preds.as_data_frame().values)
assert abs(score - skl_score) < 1e-6, "score={}, skl_score={}".format(score, skl_score)
scores['h2o_only_pipeline_with_h2o_frame'] = score
def test_h2o_only_pipeline_with_numpy_arrays():
# Note that in normal situations (release build), init_connection_args can be omitted
# otherwise, it should be set to the first H2O element in the pipeline.
# Also note that in this specific case mixing numpy inputs with a fully H2O pipeline,
# the last estimator requires the `data_conversion=True` param in order to return numpy arrays in predictions.
pipeline = Pipeline([
('standardize', H2OScaler(init_connection_args=init_connection_args)),
('pca', H2OPCA(k=2, seed=seed)),
('estimator', H2OGradientBoostingRegressor(seed=seed, data_conversion=True))
])
data = _get_data(format='numpy')
assert isinstance(data.X_train, np.ndarray)
pipeline.fit(data.X_train, data.y_train)
preds = pipeline.predict(data.X_test)
assert isinstance(preds, np.ndarray)
assert preds.shape == (len(data.X_test),)
score = pipeline.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = r2_score(data.y_test, preds)
assert abs(score - skl_score) < 1e-6
scores['h2o_only_pipeline_with_numpy_arrays'] = score
def test_mixed_pipeline_with_numpy_arrays():
# Note that in normal situations (release build), init_connection_args can be omitted
# otherwise, it should be set to the first H2O element in the pipeline
pipeline = Pipeline([
('standardize', StandardScaler()),
('pca', PCA(n_components=2, random_state=seed)),
('estimator', H2OGradientBoostingRegressor(seed=seed, init_connection_args=init_connection_args))
])
data = _get_data(format='numpy')
assert isinstance(data.X_train, np.ndarray)
pipeline.fit(data.X_train, data.y_train)
preds = pipeline.predict(data.X_test)
assert isinstance(preds, np.ndarray)
assert preds.shape == (len(data.X_test),)
score = pipeline.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = r2_score(data.y_test, preds)
assert abs(score - skl_score) < 1e-6
scores['mixed_pipeline_with_numpy_arrays'] = score
def test_generic_estimator_with_distribution_param():
# Note that in normal situations (release build), init_connection_args can be omitted
# otherwise, it should be set to the first H2O element in the pipeline
pipeline = Pipeline([
('standardize', StandardScaler()),
('pca', PCA(n_components=2, random_state=seed)),
('estimator', H2OGradientBoostingEstimator(distribution='gaussian', seed=seed, init_connection_args=init_connection_args))
])
data = _get_data(format='numpy')
assert isinstance(data.X_train, np.ndarray)
pipeline.fit(data.X_train, data.y_train)
preds = pipeline.predict(data.X_test)
assert isinstance(preds, np.ndarray)
assert preds.shape == (len(data.X_test),)
score = pipeline.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = r2_score(data.y_test, preds)
assert abs(score - skl_score) < 1e-6
scores['generic_estimator_with_distribution_param'] = score
def _assert_test_scores_equivalent(lk, rk):
if lk in scores and rk in scores:
assert abs(scores[lk] - abs(scores[rk])) < 1e-6, \
"expected equivalent scores but got {lk}={lscore} and {rk}={rscore}" \
.format(lk=lk, rk=rk, lscore=scores[lk], rscore=scores[rk])
elif lk not in scores:
print("no scores for {}".format(lk))
else:
print("no scores for {}".format(rk))
def test_scores_are_equivalent():
_assert_test_scores_equivalent('h2o_only_pipeline_with_h2o_frame', 'h2o_only_pipeline_with_numpy_arrays')
_assert_test_scores_equivalent('mixed_pipeline_with_numpy_arrays', 'generic_estimator_with_distribution_param')
pyunit_utils.run_tests([
test_h2o_only_pipeline_with_h2o_frames,
test_h2o_only_pipeline_with_numpy_arrays,
test_mixed_pipeline_with_numpy_arrays,
test_generic_estimator_with_distribution_param,
test_scores_are_equivalent,
])
| apache-2.0 |
espg/scikit-learn | sklearn/preprocessing/_function_transformer.py | 4 | 12151 | import warnings
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..utils.metaestimators import available_if
from ..utils.validation import (
_allclose_dense_sparse,
_check_feature_names_in,
check_array,
)
from ..utils._param_validation import StrOptions
def _identity(X):
"""The identity function."""
return X
class FunctionTransformer(TransformerMixin, BaseEstimator):
"""Constructs a transformer from an arbitrary callable.
A FunctionTransformer forwards its X (and optionally y) arguments to a
user-defined function or function object and returns the result of this
function. This is useful for stateless transformations such as taking the
log of frequencies, doing custom scaling, etc.
Note: If a lambda is used as the function, then the resulting
transformer will not be pickleable.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <function_transformer>`.
Parameters
----------
func : callable, default=None
The callable to use for the transformation. This will be passed
the same arguments as transform, with args and kwargs forwarded.
If func is None, then func will be the identity function.
inverse_func : callable, default=None
The callable to use for the inverse transformation. This will be
passed the same arguments as inverse transform, with args and
kwargs forwarded. If inverse_func is None, then inverse_func
will be the identity function.
validate : bool, default=False
Indicate that the input X array should be checked before calling
``func``. The possibilities are:
- If False, there is no input validation.
- If True, then X will be converted to a 2-dimensional NumPy array or
sparse matrix. If the conversion is not possible an exception is
raised.
.. versionchanged:: 0.22
The default of ``validate`` changed from True to False.
accept_sparse : bool, default=False
Indicate that func accepts a sparse matrix as input. If validate is
False, this has no effect. Otherwise, if accept_sparse is false,
sparse matrix inputs will cause an exception to be raised.
check_inverse : bool, default=True
Whether to check that or ``func`` followed by ``inverse_func`` leads to
the original inputs. It can be used for a sanity check, raising a
warning when the condition is not fulfilled.
.. versionadded:: 0.20
feature_names_out : callable, 'one-to-one' or None, default=None
Determines the list of feature names that will be returned by the
`get_feature_names_out` method. If it is 'one-to-one', then the output
feature names will be equal to the input feature names. If it is a
callable, then it must take two positional arguments: this
`FunctionTransformer` (`self`) and an array-like of input feature names
(`input_features`). It must return an array-like of output feature
names. The `get_feature_names_out` method is only defined if
`feature_names_out` is not None.
See ``get_feature_names_out`` for more details.
.. versionadded:: 1.1
kw_args : dict, default=None
Dictionary of additional keyword arguments to pass to func.
.. versionadded:: 0.18
inv_kw_args : dict, default=None
Dictionary of additional keyword arguments to pass to inverse_func.
.. versionadded:: 0.18
Attributes
----------
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 1.0
See Also
--------
MaxAbsScaler : Scale each feature by its maximum absolute value.
StandardScaler : Standardize features by removing the mean and
scaling to unit variance.
LabelBinarizer : Binarize labels in a one-vs-all fashion.
MultiLabelBinarizer : Transform between iterable of iterables
and a multilabel format.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import FunctionTransformer
>>> transformer = FunctionTransformer(np.log1p)
>>> X = np.array([[0, 1], [2, 3]])
>>> transformer.transform(X)
array([[0. , 0.6931...],
[1.0986..., 1.3862...]])
"""
_parameter_constraints: dict = {
"func": [callable, None],
"inverse_func": [callable, None],
"validate": ["boolean"],
"accept_sparse": ["boolean"],
"check_inverse": ["boolean"],
"feature_names_out": [callable, StrOptions({"one-to-one"}), None],
"kw_args": [dict, None],
"inv_kw_args": [dict, None],
}
def __init__(
self,
func=None,
inverse_func=None,
*,
validate=False,
accept_sparse=False,
check_inverse=True,
feature_names_out=None,
kw_args=None,
inv_kw_args=None,
):
self.func = func
self.inverse_func = inverse_func
self.validate = validate
self.accept_sparse = accept_sparse
self.check_inverse = check_inverse
self.feature_names_out = feature_names_out
self.kw_args = kw_args
self.inv_kw_args = inv_kw_args
def _check_input(self, X, *, reset):
if self.validate:
return self._validate_data(X, accept_sparse=self.accept_sparse, reset=reset)
elif reset:
# Set feature_names_in_ and n_features_in_ even if validate=False
# We run this only when reset==True to store the attributes but not
# validate them, because validate=False
self._check_n_features(X, reset=reset)
self._check_feature_names(X, reset=reset)
return X
def _check_inverse_transform(self, X):
"""Check that func and inverse_func are the inverse."""
idx_selected = slice(None, None, max(1, X.shape[0] // 100))
X_round_trip = self.inverse_transform(self.transform(X[idx_selected]))
if not np.issubdtype(X.dtype, np.number):
raise ValueError(
"'check_inverse' is only supported when all the elements in `X` is"
" numerical."
)
if not _allclose_dense_sparse(X[idx_selected], X_round_trip):
warnings.warn(
"The provided functions are not strictly"
" inverse of each other. If you are sure you"
" want to proceed regardless, set"
" 'check_inverse=False'.",
UserWarning,
)
def fit(self, X, y=None):
"""Fit transformer by checking X.
If ``validate`` is ``True``, ``X`` will be checked.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input array.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
FunctionTransformer class instance.
"""
self._validate_params()
X = self._check_input(X, reset=True)
if self.check_inverse and not (self.func is None or self.inverse_func is None):
self._check_inverse_transform(X)
return self
def transform(self, X):
"""Transform X using the forward function.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input array.
Returns
-------
X_out : array-like, shape (n_samples, n_features)
Transformed input.
"""
X = self._check_input(X, reset=False)
return self._transform(X, func=self.func, kw_args=self.kw_args)
def inverse_transform(self, X):
"""Transform X using the inverse function.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input array.
Returns
-------
X_out : array-like, shape (n_samples, n_features)
Transformed input.
"""
if self.validate:
X = check_array(X, accept_sparse=self.accept_sparse)
return self._transform(X, func=self.inverse_func, kw_args=self.inv_kw_args)
@available_if(lambda self: self.feature_names_out is not None)
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
This method is only defined if `feature_names_out` is not None.
Parameters
----------
input_features : array-like of str or None, default=None
Input feature names.
- If `input_features` is None, then `feature_names_in_` is
used as the input feature names. If `feature_names_in_` is not
defined, then names are generated:
`[x0, x1, ..., x(n_features_in_ - 1)]`.
- If `input_features` is array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
- If `feature_names_out` is 'one-to-one', the input feature names
are returned (see `input_features` above). This requires
`feature_names_in_` and/or `n_features_in_` to be defined, which
is done automatically if `validate=True`. Alternatively, you can
set them in `func`.
- If `feature_names_out` is a callable, then it is called with two
arguments, `self` and `input_features`, and its return value is
returned by this method.
"""
if hasattr(self, "n_features_in_") or input_features is not None:
input_features = _check_feature_names_in(self, input_features)
if self.feature_names_out == "one-to-one":
names_out = input_features
elif callable(self.feature_names_out):
names_out = self.feature_names_out(self, input_features)
else:
raise ValueError(
f"feature_names_out={self.feature_names_out!r} is invalid. "
'It must either be "one-to-one" or a callable with two '
"arguments: the function transformer and an array-like of "
"input feature names. The callable must return an array-like "
"of output feature names."
)
return np.asarray(names_out, dtype=object)
def _transform(self, X, func=None, kw_args=None):
if func is None:
func = _identity
return func(X, **(kw_args if kw_args else {}))
def __sklearn_is_fitted__(self):
"""Return True since FunctionTransfomer is stateless."""
return True
def _more_tags(self):
return {"no_validation": not self.validate, "stateless": True}
def set_output(self, *, transform=None):
"""Set output container.
See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py`
for an example on how to use the API.
Parameters
----------
transform : {"default", "pandas"}, default=None
Configure output of `transform` and `fit_transform`.
- `"default"`: Default output format of a transformer
- `"pandas"`: DataFrame output
- `None`: Transform configuration is unchanged
Returns
-------
self : estimator instance
Estimator instance.
"""
if hasattr(super(), "set_output"):
return super().set_output(transform=transform)
if transform == "pandas" and self.feature_names_out is None:
warnings.warn(
'With transform="pandas", `func` should return a DataFrame to follow'
" the set_output API."
)
return self
| bsd-3-clause |
gheinrich/DIGITS-GAN | digits/dataset/images/generic/test_views.py | 1 | 9272 | # Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import json
import os
import tempfile
from bs4 import BeautifulSoup
from .test_lmdb_creator import create_lmdbs
from digits import test_utils
import digits.test_views
# May be too short on a slow system
TIMEOUT_DATASET = 45
################################################################################
# Base classes (they don't start with "Test" so nose won't run them)
################################################################################
class BaseViewsTest(digits.test_views.BaseViewsTest):
"""
Provides some functions
"""
@classmethod
def dataset_exists(cls, job_id):
return cls.job_exists(job_id, 'datasets')
@classmethod
def dataset_status(cls, job_id):
return cls.job_status(job_id, 'datasets')
@classmethod
def abort_dataset(cls, job_id):
return cls.abort_job(job_id, job_type='datasets')
@classmethod
def dataset_wait_completion(cls, job_id, **kwargs):
kwargs['job_type'] = 'datasets'
if 'timeout' not in kwargs:
kwargs['timeout'] = TIMEOUT_DATASET
return cls.job_wait_completion(job_id, **kwargs)
@classmethod
def delete_dataset(cls, job_id):
return cls.delete_job(job_id, job_type='datasets')
class BaseViewsTestWithImageset(BaseViewsTest):
"""
Provides some LMDBs and some functions
"""
@classmethod
def setUpClass(cls):
super(BaseViewsTestWithImageset, cls).setUpClass()
if not hasattr(BaseViewsTestWithImageset, 'imageset_folder'):
# Create folder and LMDBs for all test classes
BaseViewsTestWithImageset.imageset_folder = tempfile.mkdtemp()
BaseViewsTestWithImageset.test_image = create_lmdbs(BaseViewsTestWithImageset.imageset_folder)
BaseViewsTestWithImageset.val_db_path = os.path.join(
BaseViewsTestWithImageset.imageset_folder,
'val_images')
cls.created_datasets = []
@classmethod
def tearDownClass(cls):
# delete any created datasets
for job_id in cls.created_datasets:
cls.delete_dataset(job_id)
super(BaseViewsTestWithImageset, cls).tearDownClass()
@classmethod
def create_dataset(cls, **kwargs):
"""
Create a dataset
Returns the job_id
Raises RuntimeError if job fails to create
Keyword arguments:
**kwargs -- data to be sent with POST request
"""
data = {
'dataset_name': 'test_dataset',
'group_name': 'test_group',
'method': 'prebuilt',
'prebuilt_train_images': os.path.join(cls.imageset_folder, 'train_images'),
'prebuilt_train_labels': os.path.join(cls.imageset_folder, 'train_labels'),
'prebuilt_val_images': os.path.join(cls.imageset_folder, 'val_images'),
'prebuilt_val_labels': os.path.join(cls.imageset_folder, 'val_labels'),
'prebuilt_mean_file': os.path.join(cls.imageset_folder, 'train_mean.binaryproto'),
}
data.update(kwargs)
request_json = data.pop('json', False)
url = '/datasets/images/generic'
if request_json:
url += '.json'
rv = cls.app.post(url, data=data)
if request_json:
if rv.status_code != 200:
print json.loads(rv.data)
raise RuntimeError('Model creation failed with %s' % rv.status_code)
return json.loads(rv.data)['id']
# expect a redirect
if not 300 <= rv.status_code <= 310:
s = BeautifulSoup(rv.data, 'html.parser')
div = s.select('div.alert-danger')
if div:
print div[0]
else:
print rv.data
raise RuntimeError('Failed to create dataset - status %s' % rv.status_code)
job_id = cls.job_id_from_response(rv)
assert cls.dataset_exists(job_id), 'dataset not found after successful creation'
cls.created_datasets.append(job_id)
return job_id
class BaseViewsTestWithDataset(BaseViewsTestWithImageset):
"""
Provides a dataset and some functions
"""
@classmethod
def setUpClass(cls):
super(BaseViewsTestWithDataset, cls).setUpClass()
cls.dataset_id = cls.create_dataset(json=True)
assert cls.dataset_wait_completion(cls.dataset_id) == 'Done', 'create failed'
################################################################################
# Test classes
################################################################################
class TestViews(BaseViewsTest, test_utils.DatasetMixin):
"""
Tests which don't require an imageset or a dataset
"""
def test_page_dataset_new(self):
rv = self.app.get('/datasets/images/generic/new')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
assert 'New Image Dataset' in rv.data, 'unexpected page format'
def test_nonexistent_dataset(self):
assert not self.dataset_exists('foo'), "dataset shouldn't exist"
class TestCreation(BaseViewsTestWithImageset, test_utils.DatasetMixin):
"""
Dataset creation tests
"""
def test_bad_path(self):
try:
self.create_dataset(
prebuilt_train_images='/not-a-directory'
)
except RuntimeError:
return
raise AssertionError('Should have failed')
def test_create_json(self):
job_id = self.create_dataset(json=True)
self.abort_dataset(job_id)
def test_create_delete(self):
job_id = self.create_dataset()
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
def test_create_abort_delete(self):
job_id = self.create_dataset()
assert self.abort_dataset(job_id) == 200, 'abort failed'
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
def test_create_wait_delete(self):
job_id = self.create_dataset()
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
def test_no_force_same_shape(self):
job_id = self.create_dataset(force_same_shape=0)
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
def test_clone(self):
options_1 = {
'resize_channels': '1',
}
job1_id = self.create_dataset(**options_1)
assert self.dataset_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/datasets/%s.json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content1 = json.loads(rv.data)
# Clone job1 as job2
options_2 = {
'clone': job1_id,
}
job2_id = self.create_dataset(**options_2)
assert self.dataset_wait_completion(job2_id) == 'Done', 'second job failed'
rv = self.app.get('/datasets/%s.json' % job2_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content2 = json.loads(rv.data)
# These will be different
content1.pop('id')
content2.pop('id')
content1.pop('directory')
content2.pop('directory')
assert (content1 == content2), 'job content does not match'
job1 = digits.webapp.scheduler.get_job(job1_id)
job2 = digits.webapp.scheduler.get_job(job2_id)
assert (job1.form_data == job2.form_data), 'form content does not match'
class TestCreated(BaseViewsTestWithDataset, test_utils.DatasetMixin):
"""
Tests on a dataset that has already been created
"""
def test_index_json(self):
rv = self.app.get('/index.json')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
found = False
for d in content['datasets']:
if d['id'] == self.dataset_id:
found = True
break
assert found, 'dataset not found in list'
def test_dataset_json(self):
rv = self.app.get('/datasets/%s.json' % self.dataset_id)
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert content['id'] == self.dataset_id, 'expected different job_id'
def test_edit_name(self):
status = self.edit_job(
self.dataset_id,
name='new name'
)
assert status == 200, 'failed with %s' % status
rv = self.app.get('/datasets/summary?job_id=%s' % self.dataset_id)
assert rv.status_code == 200
assert 'new name' in rv.data
def test_edit_notes(self):
status = self.edit_job(
self.dataset_id,
notes='new notes'
)
assert status == 200, 'failed with %s' % status
| bsd-3-clause |
espg/scikit-learn | sklearn/impute/tests/test_impute.py | 7 | 54036 | import pytest
import warnings
import numpy as np
from scipy import sparse
from scipy.stats import kstest
import io
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_allclose_dense_sparse
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
# make IterativeImputer available
from sklearn.experimental import enable_iterative_imputer # noqa
from sklearn.datasets import load_diabetes
from sklearn.impute import MissingIndicator
from sklearn.impute import SimpleImputer, IterativeImputer
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import BayesianRidge, ARDRegression, RidgeCV
from sklearn.pipeline import Pipeline
from sklearn.pipeline import make_union
from sklearn.model_selection import GridSearchCV
from sklearn import tree
from sklearn.random_projection import _sparse_random_matrix
from sklearn.exceptions import ConvergenceWarning
from sklearn.impute._base import _most_frequent
def _assert_array_equal_and_same_dtype(x, y):
assert_array_equal(x, y)
assert x.dtype == y.dtype
def _assert_allclose_and_same_dtype(x, y):
assert_allclose(x, y)
assert x.dtype == y.dtype
def _check_statistics(X, X_true, strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, sparse = {0}" % (
strategy,
missing_values,
)
assert_ae = assert_array_equal
if X.dtype.kind == "f" or X_true.dtype.kind == "f":
assert_ae = assert_array_almost_equal
# Normal matrix
imputer = SimpleImputer(missing_values=missing_values, strategy=strategy)
X_trans = imputer.fit(X).transform(X.copy())
assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(False))
assert_ae(X_trans, X_true, err_msg=err_msg.format(False))
# Sparse matrix
imputer = SimpleImputer(missing_values=missing_values, strategy=strategy)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(True))
assert_ae(X_trans, X_true, err_msg=err_msg.format(True))
@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent", "constant"])
def test_imputation_shape(strategy):
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
imputer = SimpleImputer(strategy=strategy)
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert X_imputed.shape == (10, 2)
X_imputed = imputer.fit_transform(X)
assert X_imputed.shape == (10, 2)
iterative_imputer = IterativeImputer(initial_strategy=strategy)
X_imputed = iterative_imputer.fit_transform(X)
assert X_imputed.shape == (10, 2)
@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"])
def test_imputation_deletion_warning(strategy):
X = np.ones((3, 5))
X[:, 0] = np.nan
imputer = SimpleImputer(strategy=strategy, verbose=1)
# TODO: Remove in 1.3
with pytest.warns(FutureWarning, match="The 'verbose' parameter"):
imputer.fit(X)
with pytest.warns(UserWarning, match="Skipping"):
imputer.transform(X)
@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"])
def test_imputation_deletion_warning_feature_names(strategy):
pd = pytest.importorskip("pandas")
missing_values = np.nan
feature_names = np.array(["a", "b", "c", "d"], dtype=object)
X = pd.DataFrame(
[
[missing_values, missing_values, 1, missing_values],
[4, missing_values, 2, 10],
],
columns=feature_names,
)
imputer = SimpleImputer(strategy=strategy, verbose=1)
# TODO: Remove in 1.3
with pytest.warns(FutureWarning, match="The 'verbose' parameter"):
imputer.fit(X)
# check SimpleImputer returning feature name attribute correctly
assert_array_equal(imputer.feature_names_in_, feature_names)
# ensure that skipped feature warning includes feature name
with pytest.warns(
UserWarning, match=r"Skipping features without any observed values: \['b'\]"
):
imputer.transform(X)
@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent", "constant"])
def test_imputation_error_sparse_0(strategy):
# check that error are raised when missing_values = 0 and input is sparse
X = np.ones((3, 5))
X[0] = 0
X = sparse.csc_matrix(X)
imputer = SimpleImputer(strategy=strategy, missing_values=0)
with pytest.raises(ValueError, match="Provide a dense array"):
imputer.fit(X)
imputer.fit(X.toarray())
with pytest.raises(ValueError, match="Provide a dense array"):
imputer.transform(X)
def safe_median(arr, *args, **kwargs):
# np.median([]) raises a TypeError for numpy >= 1.10.1
length = arr.size if hasattr(arr, "size") else len(arr)
return np.nan if length == 0 else np.median(arr, *args, **kwargs)
def safe_mean(arr, *args, **kwargs):
# np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1
length = arr.size if hasattr(arr, "size") else len(arr)
return np.nan if length == 0 else np.mean(arr, *args, **kwargs)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0] + 1)
values[4::2] = -values[4::2]
tests = [
("mean", np.nan, lambda z, v, p: safe_mean(np.hstack((z, v)))),
("median", np.nan, lambda z, v, p: safe_median(np.hstack((z, v)))),
]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec - (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
# XXX unreached code as of v0.22
X_true[:, j] = np.hstack(
(v, np.repeat(true_statistics[j], nb_missing_values + nb_zeros))
)
else:
X_true[:, j] = np.hstack(
(v, z, np.repeat(true_statistics[j], nb_missing_values))
)
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy, true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array(
[
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]
).transpose()
X_imputed_median = np.array(
[
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, 0.5],
]
).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, 0.5]
_check_statistics(X, X_imputed_median, "median", statistics_median, np.nan)
@pytest.mark.parametrize("strategy", ["mean", "median"])
@pytest.mark.parametrize("dtype", [None, object, str])
def test_imputation_mean_median_error_invalid_type(strategy, dtype):
X = np.array([["a", "b", 3], [4, "e", 6], ["g", "h", 9]], dtype=dtype)
msg = "non-numeric data:\ncould not convert string to float: '"
with pytest.raises(ValueError, match=msg):
imputer = SimpleImputer(strategy=strategy)
imputer.fit_transform(X)
@pytest.mark.parametrize("strategy", ["mean", "median"])
@pytest.mark.parametrize("type", ["list", "dataframe"])
def test_imputation_mean_median_error_invalid_type_list_pandas(strategy, type):
X = [["a", "b", 3], [4, "e", 6], ["g", "h", 9]]
if type == "dataframe":
pd = pytest.importorskip("pandas")
X = pd.DataFrame(X)
msg = "non-numeric data:\ncould not convert string to float: '"
with pytest.raises(ValueError, match=msg):
imputer = SimpleImputer(strategy=strategy)
imputer.fit_transform(X)
@pytest.mark.parametrize("strategy", ["constant", "most_frequent"])
@pytest.mark.parametrize("dtype", [str, np.dtype("U"), np.dtype("S")])
def test_imputation_const_mostf_error_invalid_types(strategy, dtype):
# Test imputation on non-numeric data using "most_frequent" and "constant"
# strategy
X = np.array(
[
[np.nan, np.nan, "a", "f"],
[np.nan, "c", np.nan, "d"],
[np.nan, "b", "d", np.nan],
[np.nan, "c", "d", "h"],
],
dtype=dtype,
)
err_msg = "SimpleImputer does not support data"
with pytest.raises(ValueError, match=err_msg):
imputer = SimpleImputer(strategy=strategy)
imputer.fit(X).transform(X)
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array(
[
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
]
)
X_true = np.array(
[
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
]
)
# scipy.stats.mode, used in SimpleImputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, SimpleImputer will need to be
# updated to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
@pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0])
def test_imputation_most_frequent_objects(marker):
# Test imputation using the most-frequent strategy.
X = np.array(
[
[marker, marker, "a", "f"],
[marker, "c", marker, "d"],
[marker, "b", "d", marker],
[marker, "c", "d", "h"],
],
dtype=object,
)
X_true = np.array(
[
["c", "a", "f"],
["c", "d", "d"],
["b", "d", "d"],
["c", "d", "h"],
],
dtype=object,
)
imputer = SimpleImputer(missing_values=marker, strategy="most_frequent")
X_trans = imputer.fit(X).transform(X)
assert_array_equal(X_trans, X_true)
@pytest.mark.parametrize("dtype", [object, "category"])
def test_imputation_most_frequent_pandas(dtype):
# Test imputation using the most frequent strategy on pandas df
pd = pytest.importorskip("pandas")
f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n,i,x,\na,,y,\na,j,,\nb,j,x,")
df = pd.read_csv(f, dtype=dtype)
X_true = np.array(
[["a", "i", "x"], ["a", "j", "y"], ["a", "j", "x"], ["b", "j", "x"]],
dtype=object,
)
imputer = SimpleImputer(strategy="most_frequent")
X_trans = imputer.fit_transform(df)
assert_array_equal(X_trans, X_true)
@pytest.mark.parametrize("X_data, missing_value", [(1, 0), (1.0, np.nan)])
def test_imputation_constant_error_invalid_type(X_data, missing_value):
# Verify that exceptions are raised on invalid fill_value type
X = np.full((3, 5), X_data, dtype=float)
X[0, 0] = missing_value
with pytest.raises(ValueError, match="imputing numerical"):
imputer = SimpleImputer(
missing_values=missing_value, strategy="constant", fill_value="x"
)
imputer.fit_transform(X)
def test_imputation_constant_integer():
# Test imputation using the constant strategy on integers
X = np.array([[-1, 2, 3, -1], [4, -1, 5, -1], [6, 7, -1, -1], [8, 9, 0, -1]])
X_true = np.array([[0, 2, 3, 0], [4, 0, 5, 0], [6, 7, 0, 0], [8, 9, 0, 0]])
imputer = SimpleImputer(missing_values=-1, strategy="constant", fill_value=0)
X_trans = imputer.fit_transform(X)
assert_array_equal(X_trans, X_true)
@pytest.mark.parametrize("array_constructor", [sparse.csr_matrix, np.asarray])
def test_imputation_constant_float(array_constructor):
# Test imputation using the constant strategy on floats
X = np.array(
[
[np.nan, 1.1, 0, np.nan],
[1.2, np.nan, 1.3, np.nan],
[0, 0, np.nan, np.nan],
[1.4, 1.5, 0, np.nan],
]
)
X_true = np.array(
[[-1, 1.1, 0, -1], [1.2, -1, 1.3, -1], [0, 0, -1, -1], [1.4, 1.5, 0, -1]]
)
X = array_constructor(X)
X_true = array_constructor(X_true)
imputer = SimpleImputer(strategy="constant", fill_value=-1)
X_trans = imputer.fit_transform(X)
assert_allclose_dense_sparse(X_trans, X_true)
@pytest.mark.parametrize("marker", [None, np.nan, "NAN", "", 0])
def test_imputation_constant_object(marker):
# Test imputation using the constant strategy on objects
X = np.array(
[
[marker, "a", "b", marker],
["c", marker, "d", marker],
["e", "f", marker, marker],
["g", "h", "i", marker],
],
dtype=object,
)
X_true = np.array(
[
["missing", "a", "b", "missing"],
["c", "missing", "d", "missing"],
["e", "f", "missing", "missing"],
["g", "h", "i", "missing"],
],
dtype=object,
)
imputer = SimpleImputer(
missing_values=marker, strategy="constant", fill_value="missing"
)
X_trans = imputer.fit_transform(X)
assert_array_equal(X_trans, X_true)
@pytest.mark.parametrize("dtype", [object, "category"])
def test_imputation_constant_pandas(dtype):
# Test imputation using the constant strategy on pandas df
pd = pytest.importorskip("pandas")
f = io.StringIO("Cat1,Cat2,Cat3,Cat4\n,i,x,\na,,y,\na,j,,\nb,j,x,")
df = pd.read_csv(f, dtype=dtype)
X_true = np.array(
[
["missing_value", "i", "x", "missing_value"],
["a", "missing_value", "y", "missing_value"],
["a", "j", "missing_value", "missing_value"],
["b", "j", "x", "missing_value"],
],
dtype=object,
)
imputer = SimpleImputer(strategy="constant")
X_trans = imputer.fit_transform(df)
assert_array_equal(X_trans, X_true)
@pytest.mark.parametrize("X", [[[1], [2]], [[1], [np.nan]]])
def test_iterative_imputer_one_feature(X):
# check we exit early when there is a single feature
imputer = IterativeImputer().fit(X)
assert imputer.n_iter_ == 0
imputer = IterativeImputer()
imputer.fit([[1], [2]])
assert imputer.n_iter_ == 0
imputer.fit([[1], [np.nan]])
assert imputer.n_iter_ == 0
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
X = _sparse_random_matrix(100, 100, density=0.10)
missing_values = X.data[0]
pipeline = Pipeline(
[
("imputer", SimpleImputer(missing_values=missing_values)),
("tree", tree.DecisionTreeRegressor(random_state=0)),
]
)
parameters = {"imputer__strategy": ["mean", "median", "most_frequent"]}
Y = _sparse_random_matrix(100, 1, density=0.10).toarray()
gs = GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_copy():
# Test imputation with copy
X_orig = _sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = SimpleImputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert not np.all(X == Xt)
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert not np.all(X.data == Xt.data)
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = SimpleImputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_array_almost_equal(X, Xt)
# copy=False, sparse csc => no copy
X = X_orig.copy().tocsc()
imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_array_almost_equal(X.data, Xt.data)
# copy=False, sparse csr => copy
X = X_orig.copy()
imputer = SimpleImputer(missing_values=X.data[0], strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert not np.all(X.data == Xt.data)
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
def test_iterative_imputer_zero_iters():
rng = np.random.RandomState(0)
n = 100
d = 10
X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
missing_flag = X == 0
X[missing_flag] = np.nan
imputer = IterativeImputer(max_iter=0)
X_imputed = imputer.fit_transform(X)
# with max_iter=0, only initial imputation is performed
assert_allclose(X_imputed, imputer.initial_imputer_.transform(X))
# repeat but force n_iter_ to 0
imputer = IterativeImputer(max_iter=5).fit(X)
# transformed should not be equal to initial imputation
assert not np.all(imputer.transform(X) == imputer.initial_imputer_.transform(X))
imputer.n_iter_ = 0
# now they should be equal as only initial imputation is done
assert_allclose(imputer.transform(X), imputer.initial_imputer_.transform(X))
def test_iterative_imputer_verbose():
rng = np.random.RandomState(0)
n = 100
d = 3
X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=1)
imputer.fit(X)
imputer.transform(X)
imputer = IterativeImputer(missing_values=0, max_iter=1, verbose=2)
imputer.fit(X)
imputer.transform(X)
def test_iterative_imputer_all_missing():
n = 100
d = 3
X = np.zeros((n, d))
imputer = IterativeImputer(missing_values=0, max_iter=1)
X_imputed = imputer.fit_transform(X)
assert_allclose(X_imputed, imputer.initial_imputer_.transform(X))
@pytest.mark.parametrize(
"imputation_order", ["random", "roman", "ascending", "descending", "arabic"]
)
def test_iterative_imputer_imputation_order(imputation_order):
rng = np.random.RandomState(0)
n = 100
d = 10
max_iter = 2
X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
X[:, 0] = 1 # this column should not be discarded by IterativeImputer
imputer = IterativeImputer(
missing_values=0,
max_iter=max_iter,
n_nearest_features=5,
sample_posterior=False,
skip_complete=True,
min_value=0,
max_value=1,
verbose=1,
imputation_order=imputation_order,
random_state=rng,
)
imputer.fit_transform(X)
ordered_idx = [i.feat_idx for i in imputer.imputation_sequence_]
assert len(ordered_idx) // imputer.n_iter_ == imputer.n_features_with_missing_
if imputation_order == "roman":
assert np.all(ordered_idx[: d - 1] == np.arange(1, d))
elif imputation_order == "arabic":
assert np.all(ordered_idx[: d - 1] == np.arange(d - 1, 0, -1))
elif imputation_order == "random":
ordered_idx_round_1 = ordered_idx[: d - 1]
ordered_idx_round_2 = ordered_idx[d - 1 :]
assert ordered_idx_round_1 != ordered_idx_round_2
elif "ending" in imputation_order:
assert len(ordered_idx) == max_iter * (d - 1)
@pytest.mark.parametrize(
"estimator", [None, DummyRegressor(), BayesianRidge(), ARDRegression(), RidgeCV()]
)
def test_iterative_imputer_estimators(estimator):
rng = np.random.RandomState(0)
n = 100
d = 10
X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
imputer = IterativeImputer(
missing_values=0, max_iter=1, estimator=estimator, random_state=rng
)
imputer.fit_transform(X)
# check that types are correct for estimators
hashes = []
for triplet in imputer.imputation_sequence_:
expected_type = (
type(estimator) if estimator is not None else type(BayesianRidge())
)
assert isinstance(triplet.estimator, expected_type)
hashes.append(id(triplet.estimator))
# check that each estimator is unique
assert len(set(hashes)) == len(hashes)
def test_iterative_imputer_clip():
rng = np.random.RandomState(0)
n = 100
d = 10
X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
imputer = IterativeImputer(
missing_values=0, max_iter=1, min_value=0.1, max_value=0.2, random_state=rng
)
Xt = imputer.fit_transform(X)
assert_allclose(np.min(Xt[X == 0]), 0.1)
assert_allclose(np.max(Xt[X == 0]), 0.2)
assert_allclose(Xt[X != 0], X[X != 0])
def test_iterative_imputer_clip_truncnorm():
rng = np.random.RandomState(0)
n = 100
d = 10
X = _sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
X[:, 0] = 1
imputer = IterativeImputer(
missing_values=0,
max_iter=2,
n_nearest_features=5,
sample_posterior=True,
min_value=0.1,
max_value=0.2,
verbose=1,
imputation_order="random",
random_state=rng,
)
Xt = imputer.fit_transform(X)
assert_allclose(np.min(Xt[X == 0]), 0.1)
assert_allclose(np.max(Xt[X == 0]), 0.2)
assert_allclose(Xt[X != 0], X[X != 0])
def test_iterative_imputer_truncated_normal_posterior():
# test that the values that are imputed using `sample_posterior=True`
# with boundaries (`min_value` and `max_value` are not None) are drawn
# from a distribution that looks gaussian via the Kolmogorov Smirnov test.
# note that starting from the wrong random seed will make this test fail
# because random sampling doesn't occur at all when the imputation
# is outside of the (min_value, max_value) range
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
X[0][0] = np.nan
imputer = IterativeImputer(
min_value=0, max_value=0.5, sample_posterior=True, random_state=rng
)
imputer.fit_transform(X)
# generate multiple imputations for the single missing value
imputations = np.array([imputer.transform(X)[0][0] for _ in range(100)])
assert all(imputations >= 0)
assert all(imputations <= 0.5)
mu, sigma = imputations.mean(), imputations.std()
ks_statistic, p_value = kstest((imputations - mu) / sigma, "norm")
if sigma == 0:
sigma += 1e-12
ks_statistic, p_value = kstest((imputations - mu) / sigma, "norm")
# we want to fail to reject null hypothesis
# null hypothesis: distributions are the same
assert ks_statistic < 0.2 or p_value > 0.1, "The posterior does appear to be normal"
@pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"])
def test_iterative_imputer_missing_at_transform(strategy):
rng = np.random.RandomState(0)
n = 100
d = 10
X_train = rng.randint(low=0, high=3, size=(n, d))
X_test = rng.randint(low=0, high=3, size=(n, d))
X_train[:, 0] = 1 # definitely no missing values in 0th column
X_test[0, 0] = 0 # definitely missing value in 0th column
imputer = IterativeImputer(
missing_values=0, max_iter=1, initial_strategy=strategy, random_state=rng
).fit(X_train)
initial_imputer = SimpleImputer(missing_values=0, strategy=strategy).fit(X_train)
# if there were no missing values at time of fit, then imputer will
# only use the initial imputer for that feature at transform
assert_allclose(
imputer.transform(X_test)[:, 0], initial_imputer.transform(X_test)[:, 0]
)
def test_iterative_imputer_transform_stochasticity():
rng1 = np.random.RandomState(0)
rng2 = np.random.RandomState(1)
n = 100
d = 10
X = _sparse_random_matrix(n, d, density=0.10, random_state=rng1).toarray()
# when sample_posterior=True, two transforms shouldn't be equal
imputer = IterativeImputer(
missing_values=0, max_iter=1, sample_posterior=True, random_state=rng1
)
imputer.fit(X)
X_fitted_1 = imputer.transform(X)
X_fitted_2 = imputer.transform(X)
# sufficient to assert that the means are not the same
assert np.mean(X_fitted_1) != pytest.approx(np.mean(X_fitted_2))
# when sample_posterior=False, and n_nearest_features=None
# and imputation_order is not random
# the two transforms should be identical even if rng are different
imputer1 = IterativeImputer(
missing_values=0,
max_iter=1,
sample_posterior=False,
n_nearest_features=None,
imputation_order="ascending",
random_state=rng1,
)
imputer2 = IterativeImputer(
missing_values=0,
max_iter=1,
sample_posterior=False,
n_nearest_features=None,
imputation_order="ascending",
random_state=rng2,
)
imputer1.fit(X)
imputer2.fit(X)
X_fitted_1a = imputer1.transform(X)
X_fitted_1b = imputer1.transform(X)
X_fitted_2 = imputer2.transform(X)
assert_allclose(X_fitted_1a, X_fitted_1b)
assert_allclose(X_fitted_1a, X_fitted_2)
def test_iterative_imputer_no_missing():
rng = np.random.RandomState(0)
X = rng.rand(100, 100)
X[:, 0] = np.nan
m1 = IterativeImputer(max_iter=10, random_state=rng)
m2 = IterativeImputer(max_iter=10, random_state=rng)
pred1 = m1.fit(X).transform(X)
pred2 = m2.fit_transform(X)
# should exclude the first column entirely
assert_allclose(X[:, 1:], pred1)
# fit and fit_transform should both be identical
assert_allclose(pred1, pred2)
def test_iterative_imputer_rank_one():
rng = np.random.RandomState(0)
d = 50
A = rng.rand(d, 1)
B = rng.rand(1, d)
X = np.dot(A, B)
nan_mask = rng.rand(d, d) < 0.5
X_missing = X.copy()
X_missing[nan_mask] = np.nan
imputer = IterativeImputer(max_iter=5, verbose=1, random_state=rng)
X_filled = imputer.fit_transform(X_missing)
assert_allclose(X_filled, X, atol=0.02)
@pytest.mark.parametrize("rank", [3, 5])
def test_iterative_imputer_transform_recovery(rank):
rng = np.random.RandomState(0)
n = 70
d = 70
A = rng.rand(n, rank)
B = rng.rand(rank, d)
X_filled = np.dot(A, B)
nan_mask = rng.rand(n, d) < 0.5
X_missing = X_filled.copy()
X_missing[nan_mask] = np.nan
# split up data in half
n = n // 2
X_train = X_missing[:n]
X_test_filled = X_filled[n:]
X_test = X_missing[n:]
imputer = IterativeImputer(
max_iter=5, imputation_order="descending", verbose=1, random_state=rng
).fit(X_train)
X_test_est = imputer.transform(X_test)
assert_allclose(X_test_filled, X_test_est, atol=0.1)
def test_iterative_imputer_additive_matrix():
rng = np.random.RandomState(0)
n = 100
d = 10
A = rng.randn(n, d)
B = rng.randn(n, d)
X_filled = np.zeros(A.shape)
for i in range(d):
for j in range(d):
X_filled[:, (i + j) % d] += (A[:, i] + B[:, j]) / 2
# a quarter is randomly missing
nan_mask = rng.rand(n, d) < 0.25
X_missing = X_filled.copy()
X_missing[nan_mask] = np.nan
# split up data
n = n // 2
X_train = X_missing[:n]
X_test_filled = X_filled[n:]
X_test = X_missing[n:]
imputer = IterativeImputer(max_iter=10, verbose=1, random_state=rng).fit(X_train)
X_test_est = imputer.transform(X_test)
assert_allclose(X_test_filled, X_test_est, rtol=1e-3, atol=0.01)
def test_iterative_imputer_early_stopping():
rng = np.random.RandomState(0)
n = 50
d = 5
A = rng.rand(n, 1)
B = rng.rand(1, d)
X = np.dot(A, B)
nan_mask = rng.rand(n, d) < 0.5
X_missing = X.copy()
X_missing[nan_mask] = np.nan
imputer = IterativeImputer(
max_iter=100, tol=1e-2, sample_posterior=False, verbose=1, random_state=rng
)
X_filled_100 = imputer.fit_transform(X_missing)
assert len(imputer.imputation_sequence_) == d * imputer.n_iter_
imputer = IterativeImputer(
max_iter=imputer.n_iter_, sample_posterior=False, verbose=1, random_state=rng
)
X_filled_early = imputer.fit_transform(X_missing)
assert_allclose(X_filled_100, X_filled_early, atol=1e-7)
imputer = IterativeImputer(
max_iter=100, tol=0, sample_posterior=False, verbose=1, random_state=rng
)
imputer.fit(X_missing)
assert imputer.n_iter_ == imputer.max_iter
def test_iterative_imputer_catch_warning():
# check that we catch a RuntimeWarning due to a division by zero when a
# feature is constant in the dataset
X, y = load_diabetes(return_X_y=True)
n_samples, n_features = X.shape
# simulate that a feature only contain one category during fit
X[:, 3] = 1
# add some missing values
rng = np.random.RandomState(0)
missing_rate = 0.15
for feat in range(n_features):
sample_idx = rng.choice(
np.arange(n_samples), size=int(n_samples * missing_rate), replace=False
)
X[sample_idx, feat] = np.nan
imputer = IterativeImputer(n_nearest_features=5, sample_posterior=True)
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
X_fill = imputer.fit_transform(X, y)
assert not np.any(np.isnan(X_fill))
@pytest.mark.parametrize(
"min_value, max_value, correct_output",
[
(0, 100, np.array([[0] * 3, [100] * 3])),
(None, None, np.array([[-np.inf] * 3, [np.inf] * 3])),
(-np.inf, np.inf, np.array([[-np.inf] * 3, [np.inf] * 3])),
([-5, 5, 10], [100, 200, 300], np.array([[-5, 5, 10], [100, 200, 300]])),
(
[-5, -np.inf, 10],
[100, 200, np.inf],
np.array([[-5, -np.inf, 10], [100, 200, np.inf]]),
),
],
ids=["scalars", "None-default", "inf", "lists", "lists-with-inf"],
)
def test_iterative_imputer_min_max_array_like(min_value, max_value, correct_output):
# check that passing scalar or array-like
# for min_value and max_value in IterativeImputer works
X = np.random.RandomState(0).randn(10, 3)
imputer = IterativeImputer(min_value=min_value, max_value=max_value)
imputer.fit(X)
assert isinstance(imputer._min_value, np.ndarray) and isinstance(
imputer._max_value, np.ndarray
)
assert (imputer._min_value.shape[0] == X.shape[1]) and (
imputer._max_value.shape[0] == X.shape[1]
)
assert_allclose(correct_output[0, :], imputer._min_value)
assert_allclose(correct_output[1, :], imputer._max_value)
@pytest.mark.parametrize(
"min_value, max_value, err_msg",
[
(100, 0, "min_value >= max_value."),
(np.inf, -np.inf, "min_value >= max_value."),
([-5, 5], [100, 200, 0], "_value' should be of shape"),
],
)
def test_iterative_imputer_catch_min_max_error(min_value, max_value, err_msg):
# check that passing scalar or array-like
# for min_value and max_value in IterativeImputer works
X = np.random.random((10, 3))
imputer = IterativeImputer(min_value=min_value, max_value=max_value)
with pytest.raises(ValueError, match=err_msg):
imputer.fit(X)
@pytest.mark.parametrize(
"min_max_1, min_max_2",
[([None, None], [-np.inf, np.inf]), ([-10, 10], [[-10] * 4, [10] * 4])],
ids=["None-vs-inf", "Scalar-vs-vector"],
)
def test_iterative_imputer_min_max_array_like_imputation(min_max_1, min_max_2):
# Test that None/inf and scalar/vector give the same imputation
X_train = np.array(
[
[np.nan, 2, 2, 1],
[10, np.nan, np.nan, 7],
[3, 1, np.nan, 1],
[np.nan, 4, 2, np.nan],
]
)
X_test = np.array(
[[np.nan, 2, np.nan, 5], [2, 4, np.nan, np.nan], [np.nan, 1, 10, 1]]
)
imputer1 = IterativeImputer(
min_value=min_max_1[0], max_value=min_max_1[1], random_state=0
)
imputer2 = IterativeImputer(
min_value=min_max_2[0], max_value=min_max_2[1], random_state=0
)
X_test_imputed1 = imputer1.fit(X_train).transform(X_test)
X_test_imputed2 = imputer2.fit(X_train).transform(X_test)
assert_allclose(X_test_imputed1[:, 0], X_test_imputed2[:, 0])
@pytest.mark.parametrize("skip_complete", [True, False])
def test_iterative_imputer_skip_non_missing(skip_complete):
# check the imputing strategy when missing data are present in the
# testing set only.
# taken from: https://github.com/scikit-learn/scikit-learn/issues/14383
rng = np.random.RandomState(0)
X_train = np.array([[5, 2, 2, 1], [10, 1, 2, 7], [3, 1, 1, 1], [8, 4, 2, 2]])
X_test = np.array([[np.nan, 2, 4, 5], [np.nan, 4, 1, 2], [np.nan, 1, 10, 1]])
imputer = IterativeImputer(
initial_strategy="mean", skip_complete=skip_complete, random_state=rng
)
X_test_est = imputer.fit(X_train).transform(X_test)
if skip_complete:
# impute with the initial strategy: 'mean'
assert_allclose(X_test_est[:, 0], np.mean(X_train[:, 0]))
else:
assert_allclose(X_test_est[:, 0], [11, 7, 12], rtol=1e-4)
@pytest.mark.parametrize("rs_imputer", [None, 1, np.random.RandomState(seed=1)])
@pytest.mark.parametrize("rs_estimator", [None, 1, np.random.RandomState(seed=1)])
def test_iterative_imputer_dont_set_random_state(rs_imputer, rs_estimator):
class ZeroEstimator:
def __init__(self, random_state):
self.random_state = random_state
def fit(self, *args, **kgards):
return self
def predict(self, X):
return np.zeros(X.shape[0])
estimator = ZeroEstimator(random_state=rs_estimator)
imputer = IterativeImputer(random_state=rs_imputer)
X_train = np.zeros((10, 3))
imputer.fit(X_train)
assert estimator.random_state == rs_estimator
@pytest.mark.parametrize(
"X_fit, X_trans, params, msg_err",
[
(
np.array([[-1, 1], [1, 2]]),
np.array([[-1, 1], [1, -1]]),
{"features": "missing-only", "sparse": "auto"},
"have missing values in transform but have no missing values in fit",
),
(
np.array([["a", "b"], ["c", "a"]], dtype=str),
np.array([["a", "b"], ["c", "a"]], dtype=str),
{},
"MissingIndicator does not support data with dtype",
),
],
)
def test_missing_indicator_error(X_fit, X_trans, params, msg_err):
indicator = MissingIndicator(missing_values=-1)
indicator.set_params(**params)
with pytest.raises(ValueError, match=msg_err):
indicator.fit(X_fit).transform(X_trans)
@pytest.mark.parametrize(
"missing_values, dtype, arr_type",
[
(np.nan, np.float64, np.array),
(0, np.int32, np.array),
(-1, np.int32, np.array),
(np.nan, np.float64, sparse.csc_matrix),
(-1, np.int32, sparse.csc_matrix),
(np.nan, np.float64, sparse.csr_matrix),
(-1, np.int32, sparse.csr_matrix),
(np.nan, np.float64, sparse.coo_matrix),
(-1, np.int32, sparse.coo_matrix),
(np.nan, np.float64, sparse.lil_matrix),
(-1, np.int32, sparse.lil_matrix),
(np.nan, np.float64, sparse.bsr_matrix),
(-1, np.int32, sparse.bsr_matrix),
],
)
@pytest.mark.parametrize(
"param_features, n_features, features_indices",
[("missing-only", 3, np.array([0, 1, 2])), ("all", 3, np.array([0, 1, 2]))],
)
def test_missing_indicator_new(
missing_values, arr_type, dtype, param_features, n_features, features_indices
):
X_fit = np.array([[missing_values, missing_values, 1], [4, 2, missing_values]])
X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]])
X_fit_expected = np.array([[1, 1, 0], [0, 0, 1]])
X_trans_expected = np.array([[1, 1, 0], [0, 0, 0]])
# convert the input to the right array format and right dtype
X_fit = arr_type(X_fit).astype(dtype)
X_trans = arr_type(X_trans).astype(dtype)
X_fit_expected = X_fit_expected.astype(dtype)
X_trans_expected = X_trans_expected.astype(dtype)
indicator = MissingIndicator(
missing_values=missing_values, features=param_features, sparse=False
)
X_fit_mask = indicator.fit_transform(X_fit)
X_trans_mask = indicator.transform(X_trans)
assert X_fit_mask.shape[1] == n_features
assert X_trans_mask.shape[1] == n_features
assert_array_equal(indicator.features_, features_indices)
assert_allclose(X_fit_mask, X_fit_expected[:, features_indices])
assert_allclose(X_trans_mask, X_trans_expected[:, features_indices])
assert X_fit_mask.dtype == bool
assert X_trans_mask.dtype == bool
assert isinstance(X_fit_mask, np.ndarray)
assert isinstance(X_trans_mask, np.ndarray)
indicator.set_params(sparse=True)
X_fit_mask_sparse = indicator.fit_transform(X_fit)
X_trans_mask_sparse = indicator.transform(X_trans)
assert X_fit_mask_sparse.dtype == bool
assert X_trans_mask_sparse.dtype == bool
assert X_fit_mask_sparse.format == "csc"
assert X_trans_mask_sparse.format == "csc"
assert_allclose(X_fit_mask_sparse.toarray(), X_fit_mask)
assert_allclose(X_trans_mask_sparse.toarray(), X_trans_mask)
@pytest.mark.parametrize(
"arr_type",
[
sparse.csc_matrix,
sparse.csr_matrix,
sparse.coo_matrix,
sparse.lil_matrix,
sparse.bsr_matrix,
],
)
def test_missing_indicator_raise_on_sparse_with_missing_0(arr_type):
# test for sparse input and missing_value == 0
missing_values = 0
X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]])
X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]])
# convert the input to the right array format
X_fit_sparse = arr_type(X_fit)
X_trans_sparse = arr_type(X_trans)
indicator = MissingIndicator(missing_values=missing_values)
with pytest.raises(ValueError, match="Sparse input with missing_values=0"):
indicator.fit_transform(X_fit_sparse)
indicator.fit_transform(X_fit)
with pytest.raises(ValueError, match="Sparse input with missing_values=0"):
indicator.transform(X_trans_sparse)
@pytest.mark.parametrize("param_sparse", [True, False, "auto"])
@pytest.mark.parametrize(
"missing_values, arr_type",
[
(np.nan, np.array),
(0, np.array),
(np.nan, sparse.csc_matrix),
(np.nan, sparse.csr_matrix),
(np.nan, sparse.coo_matrix),
(np.nan, sparse.lil_matrix),
],
)
def test_missing_indicator_sparse_param(arr_type, missing_values, param_sparse):
# check the format of the output with different sparse parameter
X_fit = np.array([[missing_values, missing_values, 1], [4, missing_values, 2]])
X_trans = np.array([[missing_values, missing_values, 1], [4, 12, 10]])
X_fit = arr_type(X_fit).astype(np.float64)
X_trans = arr_type(X_trans).astype(np.float64)
indicator = MissingIndicator(missing_values=missing_values, sparse=param_sparse)
X_fit_mask = indicator.fit_transform(X_fit)
X_trans_mask = indicator.transform(X_trans)
if param_sparse is True:
assert X_fit_mask.format == "csc"
assert X_trans_mask.format == "csc"
elif param_sparse == "auto" and missing_values == 0:
assert isinstance(X_fit_mask, np.ndarray)
assert isinstance(X_trans_mask, np.ndarray)
elif param_sparse is False:
assert isinstance(X_fit_mask, np.ndarray)
assert isinstance(X_trans_mask, np.ndarray)
else:
if sparse.issparse(X_fit):
assert X_fit_mask.format == "csc"
assert X_trans_mask.format == "csc"
else:
assert isinstance(X_fit_mask, np.ndarray)
assert isinstance(X_trans_mask, np.ndarray)
def test_missing_indicator_string():
X = np.array([["a", "b", "c"], ["b", "c", "a"]], dtype=object)
indicator = MissingIndicator(missing_values="a", features="all")
X_trans = indicator.fit_transform(X)
assert_array_equal(X_trans, np.array([[True, False, False], [False, False, True]]))
@pytest.mark.parametrize(
"X, missing_values, X_trans_exp",
[
(
np.array([["a", "b"], ["b", "a"]], dtype=object),
"a",
np.array([["b", "b", True, False], ["b", "b", False, True]], dtype=object),
),
(
np.array([[np.nan, 1.0], [1.0, np.nan]]),
np.nan,
np.array([[1.0, 1.0, True, False], [1.0, 1.0, False, True]]),
),
(
np.array([[np.nan, "b"], ["b", np.nan]], dtype=object),
np.nan,
np.array([["b", "b", True, False], ["b", "b", False, True]], dtype=object),
),
(
np.array([[None, "b"], ["b", None]], dtype=object),
None,
np.array([["b", "b", True, False], ["b", "b", False, True]], dtype=object),
),
],
)
def test_missing_indicator_with_imputer(X, missing_values, X_trans_exp):
trans = make_union(
SimpleImputer(missing_values=missing_values, strategy="most_frequent"),
MissingIndicator(missing_values=missing_values),
)
X_trans = trans.fit_transform(X)
assert_array_equal(X_trans, X_trans_exp)
@pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer])
@pytest.mark.parametrize(
"imputer_missing_values, missing_value, err_msg",
[
("NaN", np.nan, "Input X contains NaN"),
("-1", -1, "types are expected to be both numerical."),
],
)
def test_inconsistent_dtype_X_missing_values(
imputer_constructor, imputer_missing_values, missing_value, err_msg
):
# regression test for issue #11390. Comparison between incoherent dtype
# for X and missing_values was not raising a proper error.
rng = np.random.RandomState(42)
X = rng.randn(10, 10)
X[0, 0] = missing_value
imputer = imputer_constructor(missing_values=imputer_missing_values)
with pytest.raises(ValueError, match=err_msg):
imputer.fit_transform(X)
def test_missing_indicator_no_missing():
# check that all features are dropped if there are no missing values when
# features='missing-only' (#13491)
X = np.array([[1, 1], [1, 1]])
mi = MissingIndicator(features="missing-only", missing_values=-1)
Xt = mi.fit_transform(X)
assert Xt.shape[1] == 0
def test_missing_indicator_sparse_no_explicit_zeros():
# Check that non missing values don't become explicit zeros in the mask
# generated by missing indicator when X is sparse. (#13491)
X = sparse.csr_matrix([[0, 1, 2], [1, 2, 0], [2, 0, 1]])
mi = MissingIndicator(features="all", missing_values=1)
Xt = mi.fit_transform(X)
assert Xt.getnnz() == Xt.sum()
@pytest.mark.parametrize("imputer_constructor", [SimpleImputer, IterativeImputer])
def test_imputer_without_indicator(imputer_constructor):
X = np.array([[1, 1], [1, 1]])
imputer = imputer_constructor()
imputer.fit(X)
assert imputer.indicator_ is None
@pytest.mark.parametrize(
"arr_type",
[
sparse.csc_matrix,
sparse.csr_matrix,
sparse.coo_matrix,
sparse.lil_matrix,
sparse.bsr_matrix,
],
)
def test_simple_imputation_add_indicator_sparse_matrix(arr_type):
X_sparse = arr_type([[np.nan, 1, 5], [2, np.nan, 1], [6, 3, np.nan], [1, 2, 9]])
X_true = np.array(
[
[3.0, 1.0, 5.0, 1.0, 0.0, 0.0],
[2.0, 2.0, 1.0, 0.0, 1.0, 0.0],
[6.0, 3.0, 5.0, 0.0, 0.0, 1.0],
[1.0, 2.0, 9.0, 0.0, 0.0, 0.0],
]
)
imputer = SimpleImputer(missing_values=np.nan, add_indicator=True)
X_trans = imputer.fit_transform(X_sparse)
assert sparse.issparse(X_trans)
assert X_trans.shape == X_true.shape
assert_allclose(X_trans.toarray(), X_true)
@pytest.mark.parametrize(
"strategy, expected", [("most_frequent", "b"), ("constant", "missing_value")]
)
def test_simple_imputation_string_list(strategy, expected):
X = [["a", "b"], ["c", np.nan]]
X_true = np.array([["a", "b"], ["c", expected]], dtype=object)
imputer = SimpleImputer(strategy=strategy)
X_trans = imputer.fit_transform(X)
assert_array_equal(X_trans, X_true)
@pytest.mark.parametrize(
"order, idx_order",
[("ascending", [3, 4, 2, 0, 1]), ("descending", [1, 0, 2, 4, 3])],
)
def test_imputation_order(order, idx_order):
# regression test for #15393
rng = np.random.RandomState(42)
X = rng.rand(100, 5)
X[:50, 1] = np.nan
X[:30, 0] = np.nan
X[:20, 2] = np.nan
X[:10, 4] = np.nan
with pytest.warns(ConvergenceWarning):
trs = IterativeImputer(max_iter=1, imputation_order=order, random_state=0).fit(
X
)
idx = [x.feat_idx for x in trs.imputation_sequence_]
assert idx == idx_order
@pytest.mark.parametrize("missing_value", [-1, np.nan])
def test_simple_imputation_inverse_transform(missing_value):
# Test inverse_transform feature for np.nan
X_1 = np.array(
[
[9, missing_value, 3, -1],
[4, -1, 5, 4],
[6, 7, missing_value, -1],
[8, 9, 0, missing_value],
]
)
X_2 = np.array(
[
[5, 4, 2, 1],
[2, 1, missing_value, 3],
[9, missing_value, 7, 1],
[6, 4, 2, missing_value],
]
)
X_3 = np.array(
[
[1, missing_value, 5, 9],
[missing_value, 4, missing_value, missing_value],
[2, missing_value, 7, missing_value],
[missing_value, 3, missing_value, 8],
]
)
X_4 = np.array(
[
[1, 1, 1, 3],
[missing_value, 2, missing_value, 1],
[2, 3, 3, 4],
[missing_value, 4, missing_value, 2],
]
)
imputer = SimpleImputer(
missing_values=missing_value, strategy="mean", add_indicator=True
)
X_1_trans = imputer.fit_transform(X_1)
X_1_inv_trans = imputer.inverse_transform(X_1_trans)
X_2_trans = imputer.transform(X_2) # test on new data
X_2_inv_trans = imputer.inverse_transform(X_2_trans)
assert_array_equal(X_1_inv_trans, X_1)
assert_array_equal(X_2_inv_trans, X_2)
for X in [X_3, X_4]:
X_trans = imputer.fit_transform(X)
X_inv_trans = imputer.inverse_transform(X_trans)
assert_array_equal(X_inv_trans, X)
@pytest.mark.parametrize("missing_value", [-1, np.nan])
def test_simple_imputation_inverse_transform_exceptions(missing_value):
X_1 = np.array(
[
[9, missing_value, 3, -1],
[4, -1, 5, 4],
[6, 7, missing_value, -1],
[8, 9, 0, missing_value],
]
)
imputer = SimpleImputer(missing_values=missing_value, strategy="mean")
X_1_trans = imputer.fit_transform(X_1)
with pytest.raises(
ValueError, match=f"Got 'add_indicator={imputer.add_indicator}'"
):
imputer.inverse_transform(X_1_trans)
@pytest.mark.parametrize(
"expected,array,dtype,extra_value,n_repeat",
[
# array of object dtype
("extra_value", ["a", "b", "c"], object, "extra_value", 2),
(
"most_frequent_value",
["most_frequent_value", "most_frequent_value", "value"],
object,
"extra_value",
1,
),
("a", ["min_value", "min_valuevalue"], object, "a", 2),
("min_value", ["min_value", "min_value", "value"], object, "z", 2),
# array of numeric dtype
(10, [1, 2, 3], int, 10, 2),
(1, [1, 1, 2], int, 10, 1),
(10, [20, 20, 1], int, 10, 2),
(1, [1, 1, 20], int, 10, 2),
],
)
def test_most_frequent(expected, array, dtype, extra_value, n_repeat):
assert expected == _most_frequent(
np.array(array, dtype=dtype), extra_value, n_repeat
)
def test_simple_impute_pd_na():
pd = pytest.importorskip("pandas")
# Impute pandas array of string types.
df = pd.DataFrame({"feature": pd.Series(["abc", None, "de"], dtype="string")})
imputer = SimpleImputer(missing_values=pd.NA, strategy="constant", fill_value="na")
_assert_array_equal_and_same_dtype(
imputer.fit_transform(df), np.array([["abc"], ["na"], ["de"]], dtype=object)
)
# Impute pandas array of string types without any missing values.
df = pd.DataFrame({"feature": pd.Series(["abc", "de", "fgh"], dtype="string")})
imputer = SimpleImputer(fill_value="ok", strategy="constant")
_assert_array_equal_and_same_dtype(
imputer.fit_transform(df), np.array([["abc"], ["de"], ["fgh"]], dtype=object)
)
# Impute pandas array of integer types.
df = pd.DataFrame({"feature": pd.Series([1, None, 3], dtype="Int64")})
imputer = SimpleImputer(missing_values=pd.NA, strategy="constant", fill_value=-1)
_assert_allclose_and_same_dtype(
imputer.fit_transform(df), np.array([[1], [-1], [3]], dtype="float64")
)
# Use `np.nan` also works.
imputer = SimpleImputer(missing_values=np.nan, strategy="constant", fill_value=-1)
_assert_allclose_and_same_dtype(
imputer.fit_transform(df), np.array([[1], [-1], [3]], dtype="float64")
)
# Impute pandas array of integer types with 'median' strategy.
df = pd.DataFrame({"feature": pd.Series([1, None, 2, 3], dtype="Int64")})
imputer = SimpleImputer(missing_values=pd.NA, strategy="median")
_assert_allclose_and_same_dtype(
imputer.fit_transform(df), np.array([[1], [2], [2], [3]], dtype="float64")
)
# Impute pandas array of integer types with 'mean' strategy.
df = pd.DataFrame({"feature": pd.Series([1, None, 2], dtype="Int64")})
imputer = SimpleImputer(missing_values=pd.NA, strategy="mean")
_assert_allclose_and_same_dtype(
imputer.fit_transform(df), np.array([[1], [1.5], [2]], dtype="float64")
)
# Impute pandas array of float types.
df = pd.DataFrame({"feature": pd.Series([1.0, None, 3.0], dtype="float64")})
imputer = SimpleImputer(missing_values=pd.NA, strategy="constant", fill_value=-2.0)
_assert_allclose_and_same_dtype(
imputer.fit_transform(df), np.array([[1.0], [-2.0], [3.0]], dtype="float64")
)
# Impute pandas array of float types with 'median' strategy.
df = pd.DataFrame({"feature": pd.Series([1.0, None, 2.0, 3.0], dtype="float64")})
imputer = SimpleImputer(missing_values=pd.NA, strategy="median")
_assert_allclose_and_same_dtype(
imputer.fit_transform(df),
np.array([[1.0], [2.0], [2.0], [3.0]], dtype="float64"),
)
def test_missing_indicator_feature_names_out():
"""Check that missing indicator return the feature names with a prefix."""
pd = pytest.importorskip("pandas")
missing_values = np.nan
X = pd.DataFrame(
[
[missing_values, missing_values, 1, missing_values],
[4, missing_values, 2, 10],
],
columns=["a", "b", "c", "d"],
)
indicator = MissingIndicator(missing_values=missing_values).fit(X)
feature_names = indicator.get_feature_names_out()
expected_names = ["missingindicator_a", "missingindicator_b", "missingindicator_d"]
assert_array_equal(expected_names, feature_names)
def test_imputer_lists_fit_transform():
"""Check transform uses object dtype when fitted on an object dtype.
Non-regression test for #19572.
"""
X = [["a", "b"], ["c", "b"], ["a", "a"]]
imp_frequent = SimpleImputer(strategy="most_frequent").fit(X)
X_trans = imp_frequent.transform([[np.nan, np.nan]])
assert X_trans.dtype == object
assert_array_equal(X_trans, [["a", "b"]])
@pytest.mark.parametrize("dtype_test", [np.float32, np.float64])
def test_imputer_transform_preserves_numeric_dtype(dtype_test):
"""Check transform preserves numeric dtype independent of fit dtype."""
X = np.asarray(
[[1.2, 3.4, np.nan], [np.nan, 1.2, 1.3], [4.2, 2, 1]], dtype=np.float64
)
imp = SimpleImputer().fit(X)
X_test = np.asarray([[np.nan, np.nan, np.nan]], dtype=dtype_test)
X_trans = imp.transform(X_test)
assert X_trans.dtype == dtype_test
| bsd-3-clause |
espg/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 12 | 23250 | import re
from collections import defaultdict
from functools import partial
import numpy as np
import pytest
import scipy.sparse as sp
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_allclose
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_moons
from sklearn.datasets import make_circles
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
weights = [0.1, 0.25]
X, y = make_classification(
n_samples=100,
n_features=20,
n_informative=5,
n_redundant=1,
n_repeated=1,
n_classes=3,
n_clusters_per_class=1,
hypercube=False,
shift=None,
scale=None,
weights=weights,
random_state=0,
)
assert weights == [0.1, 0.25]
assert X.shape == (100, 20), "X shape mismatch"
assert y.shape == (100,), "y shape mismatch"
assert np.unique(y).shape == (3,), "Unexpected number of classes"
assert sum(y == 0) == 10, "Unexpected number of samples in class #0"
assert sum(y == 1) == 25, "Unexpected number of samples in class #1"
assert sum(y == 2) == 65, "Unexpected number of samples in class #2"
# Test for n_features > 30
X, y = make_classification(
n_samples=2000,
n_features=31,
n_informative=31,
n_redundant=0,
n_repeated=0,
hypercube=True,
scale=0.5,
random_state=0,
)
assert X.shape == (2000, 31), "X shape mismatch"
assert y.shape == (2000,), "y shape mismatch"
assert (
np.unique(X.view([("", X.dtype)] * X.shape[1]))
.view(X.dtype)
.reshape(-1, X.shape[1])
.shape[0]
== 2000
), "Unexpected number of unique rows"
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(
make_classification,
class_sep=class_sep,
n_redundant=0,
n_repeated=0,
flip_y=0,
shift=0,
scale=1,
shuffle=False,
)
for n_informative, weights, n_clusters_per_class in [
(2, [1], 1),
(2, [1 / 3] * 3, 1),
(2, [1 / 4] * 4, 1),
(2, [1 / 2] * 2, 2),
(2, [3 / 4, 1 / 4], 2),
(10, [1 / 3] * 3, 10),
(int(64), [1], 1),
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(
n_samples=n_samples,
n_classes=n_classes,
weights=weights,
n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube,
random_state=0,
)
assert X.shape == (n_samples, n_informative)
assert y.shape == (n_samples,)
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype="|S{0}".format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs, return_inverse=True)
assert (
len(unique_signs) == n_clusters
), "Wrong number of clusters, or not in distinct quadrants"
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert (
len(clusters) == n_clusters_per_class
), "Wrong number of clusters per class"
assert len(clusters_by_class) == n_classes, "Wrong number of classes"
assert_array_almost_equal(
np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples per class",
)
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(
np.abs(centroid) / class_sep,
np.ones(n_informative),
decimal=5,
err_msg="Clusters are not centered on hypercube vertices",
)
else:
with pytest.raises(AssertionError):
assert_array_almost_equal(
np.abs(centroid) / class_sep,
np.ones(n_informative),
decimal=5,
err_msg=(
"Clusters should not be centered on hypercube vertices"
),
)
with pytest.raises(ValueError):
make(n_features=2, n_informative=2, n_classes=5, n_clusters_per_class=1)
with pytest.raises(ValueError):
make(n_features=2, n_informative=2, n_classes=3, n_clusters_per_class=2)
@pytest.mark.parametrize(
"weights, err_type, err_msg",
[
([], ValueError, "Weights specified but incompatible with number of classes."),
(
[0.25, 0.75, 0.1],
ValueError,
"Weights specified but incompatible with number of classes.",
),
(
np.array([]),
ValueError,
"Weights specified but incompatible with number of classes.",
),
(
np.array([0.25, 0.75, 0.1]),
ValueError,
"Weights specified but incompatible with number of classes.",
),
(
np.random.random(3),
ValueError,
"Weights specified but incompatible with number of classes.",
),
],
)
def test_make_classification_weights_type(weights, err_type, err_msg):
with pytest.raises(err_type, match=err_msg):
make_classification(weights=weights)
@pytest.mark.parametrize("kwargs", [{}, {"n_classes": 3, "n_informative": 3}])
def test_make_classification_weights_array_or_list_ok(kwargs):
X1, y1 = make_classification(weights=[0.1, 0.9], random_state=0, **kwargs)
X2, y2 = make_classification(weights=np.array([0.1, 0.9]), random_state=0, **kwargs)
assert_almost_equal(X1, X2)
assert_almost_equal(y1, y2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(
n_samples=100,
n_features=20,
n_classes=3,
random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled,
)
assert X.shape == (100, 20), "X shape mismatch"
if not allow_unlabeled:
assert max([max(y) for y in Y]) == 2
assert min([len(y) for y in Y]) == min_length
assert max([len(y) for y in Y]) <= 3
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(
n_samples=25,
n_features=20,
n_classes=3,
random_state=0,
allow_unlabeled=allow_unlabeled,
)
assert X.shape == (25, 20), "X shape mismatch"
assert Y.shape == (25, 3), "Y shape mismatch"
assert np.all(np.sum(Y, axis=0) > min_length)
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25,
n_features=20,
n_classes=3,
random_state=0,
allow_unlabeled=allow_unlabeled,
return_distributions=True,
)
assert_array_almost_equal(X, X2)
assert_array_equal(Y, Y2)
assert p_c.shape == (3,)
assert_almost_equal(p_c.sum(), 1)
assert p_w_c.shape == (20, 3)
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(
n_samples=25,
n_features=20,
n_classes=3,
random_state=0,
return_indicator="sparse",
allow_unlabeled=allow_unlabeled,
)
assert X.shape == (25, 20), "X shape mismatch"
assert Y.shape == (25, 3), "Y shape mismatch"
assert sp.issparse(Y)
@pytest.mark.parametrize(
"params, err_msg",
[
({"n_classes": 0}, "'n_classes' should be an integer"),
({"length": 0}, "'length' should be an integer"),
],
)
def test_make_multilabel_classification_valid_arguments(params, err_msg):
with pytest.raises(ValueError, match=err_msg):
make_multilabel_classification(**params)
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert X.shape == (100, 10), "X shape mismatch"
assert y.shape == (100,), "y shape mismatch"
assert np.unique(y).shape == (2,), "Unexpected number of classes"
def test_make_regression():
X, y, c = make_regression(
n_samples=100,
n_features=10,
n_informative=3,
effective_rank=5,
coef=True,
bias=0.0,
noise=1.0,
random_state=0,
)
assert X.shape == (100, 10), "X shape mismatch"
assert y.shape == (100,), "y shape mismatch"
assert c.shape == (10,), "coef shape mismatch"
assert sum(c != 0.0) == 3, "Unexpected number of informative features"
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert X.shape == (100, 1)
def test_make_regression_multitarget():
X, y, c = make_regression(
n_samples=100,
n_features=10,
n_informative=3,
n_targets=3,
coef=True,
noise=1.0,
random_state=0,
)
assert X.shape == (100, 10), "X shape mismatch"
assert y.shape == (100, 3), "y shape mismatch"
assert c.shape == (10, 3), "coef shape mismatch"
assert_array_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(
random_state=0,
n_samples=50,
n_features=2,
centers=cluster_centers,
cluster_std=cluster_stds,
)
assert X.shape == (50, 2), "X shape mismatch"
assert y.shape == (50,), "y shape mismatch"
assert np.unique(y).shape == (3,), "Unexpected number of blobs"
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_blobs_n_samples_list():
n_samples = [50, 30, 20]
X, y = make_blobs(n_samples=n_samples, n_features=2, random_state=0)
assert X.shape == (sum(n_samples), 2), "X shape mismatch"
assert all(
np.bincount(y, minlength=len(n_samples)) == n_samples
), "Incorrect number of samples per blob"
def test_make_blobs_n_samples_list_with_centers():
n_samples = [20, 20, 20]
centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
cluster_stds = np.array([0.05, 0.2, 0.4])
X, y = make_blobs(
n_samples=n_samples, centers=centers, cluster_std=cluster_stds, random_state=0
)
assert X.shape == (sum(n_samples), 2), "X shape mismatch"
assert all(
np.bincount(y, minlength=len(n_samples)) == n_samples
), "Incorrect number of samples per blob"
for i, (ctr, std) in enumerate(zip(centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
@pytest.mark.parametrize(
"n_samples", [[5, 3, 0], np.array([5, 3, 0]), tuple([5, 3, 0])]
)
def test_make_blobs_n_samples_centers_none(n_samples):
centers = None
X, y = make_blobs(n_samples=n_samples, centers=centers, random_state=0)
assert X.shape == (sum(n_samples), 2), "X shape mismatch"
assert all(
np.bincount(y, minlength=len(n_samples)) == n_samples
), "Incorrect number of samples per blob"
def test_make_blobs_return_centers():
n_samples = [10, 20]
n_features = 3
X, y, centers = make_blobs(
n_samples=n_samples, n_features=n_features, return_centers=True, random_state=0
)
assert centers.shape == (len(n_samples), n_features)
def test_make_blobs_error():
n_samples = [20, 20, 20]
centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
cluster_stds = np.array([0.05, 0.2, 0.4])
wrong_centers_msg = re.escape(
"Length of `n_samples` not consistent with number of centers. "
f"Got n_samples = {n_samples} and centers = {centers[:-1]}"
)
with pytest.raises(ValueError, match=wrong_centers_msg):
make_blobs(n_samples, centers=centers[:-1])
wrong_std_msg = re.escape(
"Length of `clusters_std` not consistent with number of centers. "
f"Got centers = {centers} and cluster_std = {cluster_stds[:-1]}"
)
with pytest.raises(ValueError, match=wrong_std_msg):
make_blobs(n_samples, centers=centers, cluster_std=cluster_stds[:-1])
wrong_type_msg = "Parameter `centers` must be array-like. Got {!r} instead".format(
3
)
with pytest.raises(ValueError, match=wrong_type_msg):
make_blobs(n_samples, centers=3)
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0, random_state=0)
assert X.shape == (5, 10), "X shape mismatch"
assert y.shape == (5,), "y shape mismatch"
assert_array_almost_equal(
y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3]
+ 5 * X[:, 4],
)
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert X.shape == (5, 4), "X shape mismatch"
assert y.shape == (5,), "y shape mismatch"
assert_array_almost_equal(
y, (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5
)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert X.shape == (5, 4), "X shape mismatch"
assert y.shape == (5,), "y shape mismatch"
assert_array_almost_equal(
y, np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0])
)
def test_make_low_rank_matrix():
X = make_low_rank_matrix(
n_samples=50,
n_features=25,
effective_rank=5,
tail_strength=0.01,
random_state=0,
)
assert X.shape == (50, 25), "X shape mismatch"
from numpy.linalg import svd
u, s, v = svd(X)
assert sum(s) - 5 < 0.1, "X rank is not approximately 5"
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(
n_samples=5,
n_components=8,
n_features=10,
n_nonzero_coefs=3,
random_state=0,
data_transposed=False,
)
assert Y.shape == (5, 10), "Y shape mismatch"
assert D.shape == (8, 10), "D shape mismatch"
assert X.shape == (5, 8), "X shape mismatch"
for row in X:
assert len(np.flatnonzero(row)) == 3, "Non-zero coefs mismatch"
assert_allclose(Y, X @ D)
assert_allclose(np.sqrt((D**2).sum(axis=1)), np.ones(D.shape[0]))
def test_make_sparse_coded_signal_transposed():
Y, D, X = make_sparse_coded_signal(
n_samples=5,
n_components=8,
n_features=10,
n_nonzero_coefs=3,
random_state=0,
data_transposed=True,
)
assert Y.shape == (10, 5), "Y shape mismatch"
assert D.shape == (10, 8), "D shape mismatch"
assert X.shape == (8, 5), "X shape mismatch"
for col in X.T:
assert len(np.flatnonzero(col)) == 3, "Non-zero coefs mismatch"
assert_allclose(Y, D @ X)
assert_allclose(np.sqrt((D**2).sum(axis=0)), np.ones(D.shape[1]))
# TODO(1.3): remove
def test_make_sparse_code_signal_warning():
"""Check the message for future deprecation."""
warn_msg = "The default value of data_transposed will change from True to False"
with pytest.warns(FutureWarning, match=warn_msg):
make_sparse_coded_signal(
n_samples=1, n_components=1, n_features=1, n_nonzero_coefs=1, random_state=0
)
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert X.shape == (5, 10), "X shape mismatch"
assert y.shape == (5,), "y shape mismatch"
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert X.shape == (5, 5), "X shape mismatch"
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(
eigenvalues > 0, np.array([True] * 5), "X is not positive-definite"
)
@pytest.mark.parametrize("hole", [False, True])
def test_make_swiss_roll(hole):
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0, hole=hole)
assert X.shape == (5, 3)
assert t.shape == (5,)
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert X.shape == (5, 3), "X shape mismatch"
assert t.shape == (5,), "t shape mismatch"
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0
)
assert X.shape == (100, 100), "X shape mismatch"
assert rows.shape == (4, 100), "rows shape mismatch"
assert cols.shape == (
4,
100,
), "columns shape mismatch"
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0
)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5), shuffle=True, random_state=0
)
assert X.shape == (100, 100), "X shape mismatch"
assert rows.shape == (100, 100), "rows shape mismatch"
assert cols.shape == (
100,
100,
), "columns shape mismatch"
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0
)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0
)
X2, _, _ = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0
)
assert_array_almost_equal(X1, X2)
def test_make_moons():
X, y = make_moons(3, shuffle=False)
for x, label in zip(X, y):
center = [0.0, 0.0] if label == 0 else [1.0, 0.5]
dist_sqr = ((x - center) ** 2).sum()
assert_almost_equal(
dist_sqr, 1.0, err_msg="Point is not on expected unit circle"
)
def test_make_moons_unbalanced():
X, y = make_moons(n_samples=(7, 5))
assert (
np.sum(y == 0) == 7 and np.sum(y == 1) == 5
), "Number of samples in a moon is wrong"
assert X.shape == (12, 2), "X shape mismatch"
assert y.shape == (12,), "y shape mismatch"
with pytest.raises(
ValueError,
match=r"`n_samples` can be either an int " r"or a two-element tuple.",
):
make_moons(n_samples=[1, 2, 3])
with pytest.raises(
ValueError,
match=r"`n_samples` can be either an int " r"or a two-element tuple.",
):
make_moons(n_samples=(10,))
def test_make_circles():
factor = 0.3
for n_samples, n_outer, n_inner in [(7, 3, 4), (8, 4, 4)]:
# Testing odd and even case, because in the past make_circles always
# created an even number of samples.
X, y = make_circles(n_samples, shuffle=False, noise=None, factor=factor)
assert X.shape == (n_samples, 2), "X shape mismatch"
assert y.shape == (n_samples,), "y shape mismatch"
center = [0.0, 0.0]
for x, label in zip(X, y):
dist_sqr = ((x - center) ** 2).sum()
dist_exp = 1.0 if label == 0 else factor**2
dist_exp = 1.0 if label == 0 else factor**2
assert_almost_equal(
dist_sqr, dist_exp, err_msg="Point is not on expected circle"
)
assert X[y == 0].shape == (
n_outer,
2,
), "Samples not correctly distributed across circles."
assert X[y == 1].shape == (
n_inner,
2,
), "Samples not correctly distributed across circles."
with pytest.raises(ValueError):
make_circles(factor=-0.01)
with pytest.raises(ValueError):
make_circles(factor=1.0)
def test_make_circles_unbalanced():
X, y = make_circles(n_samples=(2, 8))
assert np.sum(y == 0) == 2, "Number of samples in inner circle is wrong"
assert np.sum(y == 1) == 8, "Number of samples in outer circle is wrong"
assert X.shape == (10, 2), "X shape mismatch"
assert y.shape == (10,), "y shape mismatch"
with pytest.raises(
ValueError,
match=r"`n_samples` can be either an int " r"or a two-element tuple.",
):
make_circles(n_samples=[1, 2, 3])
with pytest.raises(
ValueError,
match=r"`n_samples` can be either an int " r"or a two-element tuple.",
):
make_circles(n_samples=(10,))
| bsd-3-clause |
fboers/jumegX | decompose/icasso.py | 1 | 70425 | # Authors: Lukas Breuer <l.breuer@fz-juelich.de>
"""
----------------------------------------------------------------------
--- jumeg.decompose.fourier_ica --------------------------------------
----------------------------------------------------------------------
author : Lukas Breuer
email : l.breuer@fz-juelich.de
last update: 09.11.2016
version : 1.2
----------------------------------------------------------------------
This simple implementation of ICASSO is based on the following
publication:
----------------------------------------------------------------------
J. Himberg, A. Hyvaerinen, and F. Esposito. 'Validating the
independent components of neuroimaging time-series via
clustering and visualization', Neuroimage, 22:3(1214-1222), 2004.
Should you use this code, we kindly request you to cite the
aforementioned publication.
<http://research.ics.aalto.fi/ica/icasso/about+download.shtml
DOWNLOAD ICASSO from here>
----------------------------------------------------------------------
Overview
----------------------------------------------------------------------
Perform ICASSO estimation. ICASSO is based on running ICA
multiple times with slightly different conditions and
clustering the obtained components. Note, here FourierICA
is applied
1. Runs ICA with given parameters M times on data X.
2. Clusters the estimates and computes other statistics.
3. Returns (and visualizes) the best estimates.
----------------------------------------------------------------------
How to use ICASSO?
----------------------------------------------------------------------
from jumeg.decompose import icasso
icasso_obj = = JuMEG_icasso()
W, A, quality, fourier_ica_obj = icasso_obj.fit(fn_raw, stim_name='STI 013',
event_id=1, tmin_stim=-0.5,
tmax_stim=0.5, flow=4.0, fhigh=34.0)
--> for further comments we refer directly to the functions or to
fourier_ica_test.py
----------------------------------------------------------------------
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
import numpy as np
########################################################
# #
# JuMEG_icasso class #
# #
########################################################
class JuMEG_icasso(object):
def __init__(self, ica_method='fourierica', average=False, nrep=50,
fn_inv=None, src_loc_method='dSPM', snr=1.0,
morph2fsaverage=True, stim_name=None, event_id=1,
flow=4.0, fhigh=34.0, tmin_win=0.0, tmax_win=1.0,
pca_dim=None, dim_reduction='MDL', conv_eps=1e-9,
max_iter=2000, tICA=False, lrate=1.0, cost_function=None,
decim_epochs=False):
"""
Generate ICASSO object.
Parameters
----------
ica_method: string which ICA method should be used
default: ica_method='FourierICA'
average: should ICA be performed on data averaged above
subjects?
default: average=False
nrep: number of repetitions ICA should be performed
default: nrep=50
fn_inv: file name of inverse operator. If given
FourierICA is applied on data transformed to
source space
src_loc_method: method used for source localization.
Only of interest if 'fn_inv' is set
default: src_loc_method='dSPM'
snr: signal-to-noise ratio for performing source
localization
default: snr=1.0
morph2fsaverage: should data be morphed to the
'fsaverage' brain?
default: morph2fsaverage=True
stim_name: string which contains the name of the
stimulus channel. Only necessary if ICA should
be applied to evoked data.
event_id: integer of list of integer containing the
event IDs which should be used to generate epochs
default: event_id=1
flow: lower frequency border for estimating the optimal
de-mixing matrix using FourierICA
default: flow=4.0
fhigh: upper frequency border for estimating the optimal
de-mixing matrix using FourierICA
default: fhigh=34.0
Note: here default flow and fhigh are choosen to
contain:
- theta (4-7Hz)
- low (7.5-9.5Hz) and high alpha (10-12Hz),
- low (13-23Hz) and high beta (24-34Hz)
tmin_win: time of interest prior to stimulus onset.
Important for generating epochs to apply FourierICA
default=0.0
tmax_win: time of interest after stimulus onset.
Important for generating epochs to apply FourierICA
default=1.0
dim_reduction: {'', 'AIC', 'BIC', 'GAP', 'MDL', 'MIBS', 'explVar'}
Method for dimension selection. For further information about
the methods please check the script 'dimension_selection.py'.
pca_dim: Integer. The number of components used for PCA
decomposition.
conv_eps: iteration stops when weight changes are smaller
then this number
default: conv_eps = 1e-9
max_iter: integer containing the maximal number of
iterations to be performed in ICA estimation
default: max_iter=2000
tICA: bool if temporal ICA should be applied (and not)
FourierICA
default: tICA=False
lrate: float containg the learning rate which should be
used in the applied ICA algorithm
default: lrate=1.0
cost_function: string containg the cost-function to
use in the appled ICA algorithm. For further information
look in fourier_ica.py
default: cost_funtion=None
decim_epochs: integer. If set the number of epochs used
to estimate the optimal demixing matrix is decimated
to the given number.
default: decim_epochs=False
Returns
-------
object: ICASSO object
"""
self._ica_method = ica_method
self.average = average
self._nrep = nrep
self.fn_inv = fn_inv
self.src_loc_method = src_loc_method
self.snr = snr
self.morph2fsaverage = morph2fsaverage
self.whitenMat = [] # whitening matrix
self.dewhitenMat = [] # de-whitening matrix
self.W_est = [] # de-mixing matrix
self.A_est = [] # mixing matrix
self.dmean = [] # data mean
self.dstd = [] # data standard-deviation
self.stim_name = stim_name
self.event_id = event_id
self.flow = flow
self.fhigh = fhigh
self._sfreq = 0.0
self.tmin_win = tmin_win
self.tmax_win = tmax_win
# ICA parameter
self.conv_eps = conv_eps # stopping threshold
self.max_iter = max_iter
self.lrate = lrate # learning rate for the ICA algorithm
self.tICA = tICA # should temporal ICA be performed?
self.pca_dim = pca_dim
self.dim_reduction= dim_reduction
self.cost_function = cost_function
self.decim_epochs = decim_epochs
# make sure to chose meaningful parameters
# when not FourierICA is used
if self.ica_method != 'fourierica':
if conv_eps == 1e-9:
self.conv_eps = 1e-12 # stopping threshold
if max_iter == 2000:
self.max_iter = 200
if lrate == 1:
self.lrate = None # learning rate for the ICA algorithm
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get maximum number of repetitions
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_nrep(self, nrep):
self._nrep = nrep
def _get_nrep(self):
return int(self._nrep)
nrep = property(_get_nrep, _set_nrep)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get ICA method
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_ica_method(self, ica_method):
possible_methods = ['extended-infomax', 'fastica',
'fourierica', 'infomax']
if ica_method in possible_methods:
self._ica_method = ica_method
else:
print 'WARNING: chosen ICA method does not exist!'
print 'Must be one of the following methods: ', possible_methods
print 'But your choice was: ', ica_method
print 'Programm stops!'
import pdb
pdb.set_trace()
def _get_ica_method(self):
return self._ica_method
ica_method = property(_get_ica_method, _set_ica_method)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# estimate linkage between components
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _linkage(self, dis):
# initialize some variables
dlen, dim = dis.shape
Md = dis.copy()
Md += np.diag(np.ones(dlen)*np.inf)
# ------------------------------------------
# estimate clusters
# ------------------------------------------
# --> each vector is at first in its own cluster
Z = np.zeros((dlen-1, 3)) + np.NaN
clusters = np.arange(dlen)
Cdist = Md.copy()
for idx in np.arange(dlen-1):
d_min = np.min(Cdist)
if np.isinf(d_min):
break # no more connected clusters
else:
min_idx = np.argmin(np.min(Cdist, axis=0))
c1 = np.argmin(Cdist[:, min_idx]) # cluster1
c2 = clusters[min_idx] # cluster2
# combine the two clusters
c1_inds = (clusters == c1).nonzero()[0] # vectors belonging to c1
c2_inds = (clusters == c2).nonzero()[0] # vectors belonging to c2
c_inds = np.concatenate((c1_inds, c2_inds)) # members of the new cluster
nc_inds = len(c_inds)
# find bigger cluster
if len(c2_inds) > len(c1_inds):
c, k = c2, c1
else:
c, k = c1, c2
clusters[c_inds] = c # update cluster info
Z[idx, :] = [c, k, d_min] # save info into Z
# ------------------------------------------
# update cluster distances
# ------------------------------------------
# remove the subclusters from the cdist table
for idxC in c_inds:
Cdist[idxC, c_inds] = np.Inf # distance of clusters to its members = Inf
k_inds = c_inds[c_inds != c] # vector of the smallest cluster
Cdist[k_inds, :] = np.Inf # set distance of the subcluster to
Cdist[:, k_inds] = np.Inf # other clusters = Inf
# update the distance of this cluster to the other clusters
idxC = (clusters != c).nonzero()[0]
if len(idxC) > 0:
cl = np.unique(clusters[idxC])
for l in cl:
o_inds = (clusters == l).nonzero()[0] # indices belonging to cluster k
no_inds = len(o_inds)
vd = np.zeros((nc_inds, no_inds))
for ivd in range(nc_inds):
vd[ivd, :] = Md[c_inds[ivd], o_inds]
vd = vd.flatten()
idxvd = np.isfinite(vd).nonzero()[0]
nidxvd = len(idxvd)
sd = np.Inf if nidxvd == 0 else np.sum(vd[idxvd])/nidxvd
Cdist[c, l] = sd
Cdist[l, c] = sd
last = Z[idx, 0]
if np.isnan(last):
last = Z[idx-1, 0]
rest = np.setdiff1d(np.unique(clusters), last)
Z[idx:dlen-2, 0] = rest.transpose()
Z[idx:dlen-2, 1] = last
Z[idx:dlen-2, 2] = np.Inf
idx -= 1
else:
rest = []
# ------------------------------------------
# return values
# ------------------------------------------
# calculate the order of the samples
order = np.array([last])
# go through the combination matrix from top to down
for k in range(idx, -1, -1):
c_var = Z[k, 0]
k_var = np.array([Z[k, 1]])
idx_var = np.where(order == c_var)[0]
if len(idx_var) == 0:
order = np.concatenate((k_var, order))
else:
order = np.concatenate((order[:idx_var[0]], k_var, order[idx_var[0]:]))
order = np.concatenate((rest, order))[::-1]
# to maintain compatibility with Statistics Toolbox, the values
# in Z must be yet transformed so that they are similar to the
# output of the LINKAGE function
Zs = Z.copy()
current_cluster = np.array(range(dlen))
iter_stop = len(Z[:, 0])
for idx in range(iter_stop):
Zs[idx, 0] = current_cluster[int(Z[idx, 0])]
Zs[idx, 1] = current_cluster[int(Z[idx, 1])]
current_cluster[int(Z[idx, 0])] = dlen + idx
current_cluster[int(Z[idx, 1])] = dlen + idx
return Zs, order
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# estimate similarities
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _corrw(self):
# get some dimension information
npc = int(self.W_est[0].shape[0])
nchan = int(self.W_est[0].shape[1])
ntimes = int(len(self.W_est))
# save estimated demixing matrices W in one matrix
weight = np.zeros((ntimes*npc, nchan), dtype=np.complex)
for idx in range(ntimes):
weight[(idx*npc):((idx+1)*npc), :] = self.W_est[idx]
weight = np.dot(weight, self.dewhitenMat)
# normalize rows to unit length
weight_norm = np.abs(np.sqrt(np.sum(weight*weight.conj(), axis=1))).reshape((npc*ntimes, 1))
weight /= np.repeat(weight_norm, npc, axis=1)
# compute similarities
similarities = np.abs(np.dot(weight, weight.conj().transpose()))
similarities[similarities > 1] = 1
similarities[similarities < 0] = 0
return similarities
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# generate partitions from Z
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _z_to_partition(self, Z):
nz = Z.shape[0] + 1
C = np.zeros((nz, nz))
C[0, :] = np.arange(nz)
for ic in range(1, nz):
C[ic, :] = C[ic-1, :]
idx = (C[ic, :] == Z[ic-1, 0]) + (C[ic, :] == Z[ic-1, 1])
C[ic, idx == 1] = nz - 1 + ic
for ic in range(nz):
uniqC = np.unique(C[ic, :])
newidx = []
for elemC in C[ic, :]:
newidx = np.concatenate((newidx, (uniqC == elemC).nonzero()[0]))
C[ic, :] = newidx
idx = range(nz-1, -1, -1)
partition = C[idx, :]
return partition
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# compute cluster statistics
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _clusterstat(self, S, partitions):
# number of clusters
Ncluster = int(np.max(partitions) + 1)
# initialize dictionary
stat = {'internal_sum': np.zeros(Ncluster) * np.NaN,
'internal_min': np.zeros(Ncluster) * np.NaN,
'internal_avg': np.zeros(Ncluster) * np.NaN,
'internal_max': np.zeros(Ncluster) * np.NaN,
'external_sum': np.zeros(Ncluster) * np.NaN,
'external_min': np.zeros(Ncluster) * np.NaN,
'external_avg': np.zeros(Ncluster) * np.NaN,
'external_max': np.zeros(Ncluster) * np.NaN,
'between_min': np.zeros((Ncluster, Ncluster)),
'between_avg': np.zeros((Ncluster, Ncluster)),
'between_max': np.zeros((Ncluster, Ncluster))}
for cluster in range(Ncluster):
thisPartition = np.where(partitions == cluster)[0]
nthisPartition = len(thisPartition)
S_ = np.zeros((nthisPartition, nthisPartition))
for i in range(nthisPartition):
S_[i, :] = S[thisPartition[i], thisPartition]
S_[range(nthisPartition), range(nthisPartition)] = np.NaN
S_ = S_[np.isfinite(S_)]
if len(S_) > 0:
stat['internal_sum'][cluster] = np.sum(S_)
stat['internal_min'][cluster] = np.min(S_)
stat['internal_avg'][cluster] = np.mean(S_)
stat['internal_max'][cluster] = np.max(S_)
if Ncluster > 1:
cthisPartition = np.where(partitions != cluster)[0]
ncthisPartition = len(cthisPartition)
S_ = np.zeros((nthisPartition, ncthisPartition))
for i in range(nthisPartition):
S_[i, :] = S[thisPartition[i], cthisPartition]
stat['external_sum'][cluster] = np.sum(S_)
stat['external_min'][cluster] = np.min(S_)
stat['external_avg'][cluster] = np.mean(S_)
stat['external_max'][cluster] = np.max(S_)
for i in range(Ncluster):
Pi = np.where(i == partitions)[0]
for j in range(i+1, Ncluster):
Pj = np.where(j == partitions)[0]
d_ = np.zeros((len(Pi), len(Pj)))
for iPi in range(len(Pi)):
d_[iPi, :] = S[Pi[iPi], Pj]
stat['between_min'][i, j] = np.min(d_)
stat['between_avg'][i, j] = np.mean(d_)
stat['between_max'][i, j] = np.max(d_)
stat['between_min'] += stat['between_min'].transpose()
stat['between_avg'] += stat['between_avg'].transpose()
stat['between_max'] += stat['between_max'].transpose()
return stat
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# estimate the R-index as defined in
# Levine, E., Domany, E., 2001. 'Resampling method for
# unsupervised estimation of cluster validity'.
# Neural Comput. 13 (11), 2573-2593.
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _rindex(self, dissimilarities, partitions, verbose=True):
nPart = partitions.shape[0]
# number of clusters in each partition
Ncluster = np.max(partitions, axis=1)
ri = np.zeros(nPart)
if verbose:
print ">>> Computing R-index..."
for k in range(nPart):
hist, bin_edges = np.histogram(partitions[k, :], bins=np.arange(1, Ncluster[k]+2))
if any(hist == 1):
# contains one-item clusters (index very doubtful)
ri[k] = np.NaN
elif Ncluster[k] == 0:
# degenerate partition (all in the same cluster)
ri[k] = np.NaN
else:
# compute cluster statistics
stat = self._clusterstat(dissimilarities, partitions[k, :])
between = stat['between_avg']
between[range(len(between)), range(len(between))] = np.Inf
internal = stat['internal_avg'].transpose()
ri[k] = np.mean(internal/np.min(between, axis=0))
return ri
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# estimate clusters
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _cluster(self, verbose=True):
# ------------------------------------------
# compute dissimilarities
# ------------------------------------------
similarities = self._corrw()
dissimilarities = 1.0 - similarities
# ------------------------------------------
# generate partitions
# ------------------------------------------
Z, order = self._linkage(dissimilarities)
partitions = self._z_to_partition(Z)
# ------------------------------------------
# compute cluster validity
# ------------------------------------------
npc = int(self.W_est[0].shape[0])
indexR = self._rindex(dissimilarities, partitions[:npc, :], verbose=verbose)
return Z, order, partitions, indexR, dissimilarities, similarities
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# estimate curve that decreases from v0 to vn with a
# rate that is somewhere between linear and 1/t
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _potency_curve(self, v0, vn, t):
return v0 * ((1.0*vn/v0)**(np.arange(t)/(t-1.0)))
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# compute principal coordinates (using linear
# Metric Multi-Dimensional Scaling)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _mmds(self, D):
nD = D.shape[0]
# square dissimilarities
D2 = D**2
# center matrix
Z = np.identity(nD) - np.ones((nD, nD))/(1.0 * nD)
# double centered inner product
B = -0.5 * np.dot(Z, np.dot(D2, Z))
# SVD
U, sing, V = np.linalg.svd(B)
# coordinates
X = np.dot(U, np.diag(np.sqrt(sing)))
return X
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# projects data vectors using Curvilinear Component
# Analysis
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _cca(self, D, P, epochs, Mdist, alpha0, lambda0):
# check input data
noc, dim = D.shape
noc_x_1 = np.zeros(noc, dtype=np.int)
me = np.zeros(dim)
st = np.zeros(dim)
# estimate mean and standard-deviation
for i in range(dim):
idx = np.where(np.isfinite(D[:, i]))[0]
me[i] = np.mean(D[idx, i])
st[i] = np.std(D[idx, i])
# replace unknown projections in initial
# projection with known values
inds = np.where(np.isnan(P))[0]
if len(inds):
P[inds] = np.random.rand(len(inds))
dummy, odim = P.shape
odim_x_1 = np.ones((odim, 1), dtype=np.int)
# training length
train_len = int(epochs * noc)
# random sample order
sample_inds = np.floor(noc * np.random.rand(train_len))
# mutual distances
nMdist = Mdist.shape[0]
if nMdist == 1:
Mdist = np.repeat(1, noc)
if nMdist != noc:
print ">>> ERROR: Mutual distance matrix size and data set size do not match!"
import pdb
pdb.set_trace()
# alpha and lambda
Alpha = self._potency_curve(alpha0, alpha0/100.0, train_len)
Lambda = self._potency_curve(lambda0, 0.01, train_len)
# action
for i in range(train_len):
ind = int(sample_inds[i]) # sample index
dx = Mdist[:, ind] # mutual distance in the input space
known = np.where(np.isfinite(dx))[0]
nknown = len(known)
if nknown > 0:
y = P[ind, :].reshape(1, odim) # sample vector's projection
dyy = P[known, :] - y[noc_x_1[known], :]
dy = np.sqrt(np.dot(dyy**2, odim_x_1))
dy[dy == 0] = 1.0 # to get ride of div-by-zero's
fy = np.exp(-dy/Lambda[i]) * (dx[known].reshape(nknown, 1)/dy - 1.0)
P[known, :] += Alpha[i] * fy[:, np.zeros(odim, dtype=np.int)] * dyy
# set projections of totally unknown vectors as unknown
unknown = np.where(np.isnan(D))[0]
if len(unknown) > 0:
D_tmp = D.copy()
D_tmp[unknown] = 1
unknown = np.where(np.sum(D_tmp, axis=1) == dim)[0]
if len(unknown) > 0:
P[unknown, :] = np.NaN
return P
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# to project points on plane so that Euclidean distances
# between the projected points correspond to the
# similarity matrix between IC estimates
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _projection(self, dis, verbose=True):
# initialize some parameter
outputDim = 2 # we project onto a plane
alpha = 0.7
epochs = 75.0
radius = np.max([self.nrep/20.0, 10])
s2d = 'sqrtsim2dis'
# perform similarity-to-dissimilarity transformation
D = np.abs(np.sqrt(dis))
nD = D.shape[0]
if verbose:
print ">>> Perform projection to plane..."
# start from MMDS (linear Metric Multi-Dimensional Scaling)
init_proj = self._mmds(D)
init_proj = init_proj[:, :outputDim]
dummy = np.random.rand(nD, outputDim)
proj = self._cca(dummy, init_proj, epochs, D, alpha, radius)
return proj
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# to get the index of the component in the center
# of each cluster
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _idx2centrotypes(self, P, similarities, mode='partition'):
if mode == 'index':
nsim = len(P)
similarity = np.zeros((nsim, nsim))
for i in range(nsim):
similarity[i, :] = similarities[P[i], P]
idx_one = np.argmax(np.sum(similarity, axis=0))
centro_idx = P[idx_one]
elif mode == 'partition':
Ncluster = int(np.max(P) + 1)
centro_idx = np.zeros(Ncluster, dtype=np.int)
for i in range(Ncluster):
idx = np.where(P == i)[0]
centro_idx[i] = self._idx2centrotypes(idx, similarities, mode='index')
else:
print ">>> ERROR: Unknown operation mode!"
import pdb
pdb.set_trace()
return centro_idx
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# get optimal demixing matrix W
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _getW(self, centro_idx):
import types
nW = len(self.W_est)
npc, nchan = self.W_est[0].shape
npc = int(npc)
nchan = int(nchan)
if isinstance(self.W_est[0][0, 0], types.ComplexType):
allW = np.zeros((nW * npc, nchan), dtype=np.complex)
else:
allW = np.zeros((nW * npc, nchan))
for iw in range(nW):
allW[iw*npc:(iw+1)*npc, :] = self.W_est[iw]
return allW[centro_idx, :]
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# method to estimate the quality of a cluster
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _cluster_quality(self, partition, simililarities, mode='mean'):
Ncluster = np.max(partition)
stat = self._clusterstat(simililarities, partition)
# compute score
if mode == 'minmax':
internal = stat['internal_min']
external = stat['external_max']
elif mode == 'mean':
internal = stat['internal_avg']
external = stat['external_avg']
else:
print ">>> ERROR: Unrecognized score function!"
import pdb
pdb.set_trace()
internal[np.isnan(internal)] = 0
external[np.isnan(external)] = 0
score = np.abs(internal - external)
return score
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# to compute the stability (quality) indices of the
# estimated clusters
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _stability(self, partitions, similarities, L=None):
# check input parameter
npc = int(self.W_est[0].shape[0])
if L == None: L = npc-1
Ncluster = range(L)
NofEstimates = np.zeros(L, dtype=np.int)
partition = partitions[L, :]
for i in Ncluster:
idx = np.where(partition == i)[0]
NofEstimates[i] = len(idx)
# compute cluster quality index
Iq = self._cluster_quality(partition, similarities, mode='mean')
return Iq
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# get optimal (de-)mixing matrix
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _get_results(self, partitions, similarities, L=None, sort_results=True):
# check input parameter
npc = int(self.W_est[0].shape[0])
if L == None: L = npc-1
if L < 0 or L > npc:
print ">>> WARNING: Number of requested estimate clusters out of range!"
print ">>> Setting number of clusters to %d" % npc
L = npc
# get indices of ICs in the cluster centers
centro_idx = self._idx2centrotypes(partitions[L, :], similarities, mode='partition')
# get optimal demixing matrix
W = self._getW(centro_idx)
Iq = self._stability(partitions, similarities, L=L)
if sort_results:
idx_sort = np.argsort(Iq)[::-1]
Iq = Iq[idx_sort]
W = W[idx_sort, :]
A = np.linalg.pinv(W)
return A, W, Iq
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# prepare data for applying the fit routine
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def prepare_data_for_fit(self, fn_raw, stim_name=None,
stim_delay=0, tmin_stim=0.0, tmax_stim=1.0,
flow=4.0, fhigh=34.0,
event_id=1, resp_id=None, corr_event_picking=None,
hamming_data=True, remove_outliers=True,
fn_inv=None, contrast_id=[],
baseline=(None, None), averaged_epochs=False,
decim_epochs=False, interpolate_bads=True,
unfiltered=False, verbose=True):
'''
Routine to prepare the data for ICA application. Preparation
includes epoch generation, transformation to Fourier space
(if desired) and source localization applied to single
epochs.
Parameters
----------
fn_raw: filename of the input data (expect fif-file).
stim_name: name of the stimulus channel. Note, for
applying FourierCIA data are chopped around stimulus
onset. If not set data are chopped in overlapping
windows
default: stim_names=None
stim_delay: delay of stimulus presentation in milliseconds
default: stim_delay=0
tmin_stim: time of interest prior to stimulus onset.
Important for generating epochs to apply FourierICA
default = 0.0
tmax_stim: time of interest after stimulus onset.
Important for generating epochs to apply FourierICA
default = 1.0
flow: lower frequency border for estimating the optimal
de-mixing matrix using FourierICA
default: flow=4.0
fhigh: upper frequency border for estimating the optimal
de-mixing matrix using FourierICA
default: fhigh=34.0
Note: here default flow and fhigh are choosen to
contain:
- theta (4-7Hz)
- low (7.5-9.5Hz) and high alpha (10-12Hz),
- low (13-23Hz) and high beta (24-34Hz)
event_id: ID of the event of interest to be considered in
the stimulus channel. Only of interest if 'stim_name'
is set
default: event_id=1
resp_id: Response ID for correct event estimation. Note:
Must be in the order corresponding to the 'event_id'
default: resp_id=None
corr_event_picking: if set should contain the complete python
path and name of the function used to identify only the
correct events
hamming_data: if set a hamming window is applied to each
epoch prior to Fourier transformation
default: hamming_data=True
remove_outliers: If set outliers are removed from the Fourier
transformed data.
Outliers are defined as windows with large log-average power (LAP)
LAP_{c,t}=log \sum_{f}{|X_{c,tf}|^2
where c, t and f are channels, window time-onsets and frequencies,
respectively. The threshold is defined as |mean(LAP)+3 std(LAP)|.
This process can be bypassed or replaced by specifying a function
handle as an optional parameter.
remove_outliers=False
fn_inv: file name of inverse operator. If given
FourierICA is applied on data transformed to
source space
contrast_id: If set FourierICA is applied to contrast epochs
between events in event_id and events in contrast_id.
is set
default: contrast_id=[]
baseline: If set baseline correction is applied to epochs prior to
ICA estimation.
averaged_epochs: Should epochs be averaged before
FourierICA application? Note, averaged data require
less memory!
default: average=False
decim_epochs: if set the number of epochs will be reduced (per
subject) to that number for the estimation of the demixing matrix.
Note: the epochs were chosen randomly from the complete set of
epochs.
interpolate_bads: if set bad channels are interpolated (using the
mne routine raw.interpolate_bads()).
unfiltered: bool
If true data are not filtered to a certain frequency range when
Fourier transformation is applied
default: unfiltered=False
verbose: bool, str, int, or None
If not None, override default verbose level
(see mne.verbose).
default: verbose=True
Returns
-------
meg_data: array
2D array containg the MEg data used for FourierICA estimation
src_loc_data: array
3D array containing the source localization
data used for FourierICA estimation
(nfreq x nepochs x nvoxel)
vertno: list
list containing two arrays with the order
of the vertices.
data_already_stft: boolean
'True' if data are transformed to Fourier space, otherwise
'False'
events: list
list containing the indices of all events used to generate the
epochs for applying FourierICA
sfreq: float
sampling frequency of the data
meg_channels: list
list containing the name of all MEG channels used for FourierICA
'''
# ------------------------------------------
# import necessary module
# ------------------------------------------
from fourier_ica import apply_stft, stft_source_localization
from mne import find_events, pick_types
from mne.io import Raw
# ------------------------------------------
# prepare data to apply FourierICA
# ------------------------------------------
meg_raw = Raw(fn_raw, preload=True)
# interpolate bad channels
if interpolate_bads:
meg_raw.interpolate_bads()
meg_channels = pick_types(meg_raw.info, meg=True, eeg=False,
eog=False, stim=False, exclude='bads')
meg_data = meg_raw._data[meg_channels, :]
sfreq = meg_raw.info['sfreq']
# check if ICASSO should be applied
# to evoked or resting state data
if stim_name:
events_all = find_events(meg_raw, stim_channel=stim_name, consecutive=True,
shortest_event=1)
# check if there is a stimulus delay
if stim_delay:
stim_delay_tsl = int(np.round(stim_delay * meg_raw.info['sfreq']/1000.0))
events_all[:, 0] += stim_delay_tsl
# check if only correct events should be chosen
if corr_event_picking:
if isinstance(corr_event_picking, basestring):
import importlib
mod_name, func_name = corr_event_picking.rsplit('.', 1)
mod = importlib.import_module(mod_name)
func = getattr(mod, func_name)
resp_name = 'STI 013' if stim_name == 'STI 014' else 'STI 014'
response = find_events(meg_raw, stim_channel=resp_name, consecutive=True,
shortest_event=1)
if np.any(resp_id):
events_all, _ = func(events_all, response, sfreq, event_id, resp_id)
else:
events_all, _ = func(events_all, response, sfreq, event_id)
else:
print ">>> ERROR: 'corr_event_picking' should be a string containing the complete python"
print ">>> path and name of the function used to identify only the correct events!"
import pdb
pdb.set_trace()
if np.any(contrast_id):
contrast_events = events_all[events_all[:, 2] == contrast_id, 0]
if not isinstance(event_id, (list, tuple)):
event_id = [event_id]
for idx, event in enumerate(event_id):
if idx == 0:
events = events_all[events_all[:, 2] == event, :]
else:
events = np.concatenate((events, events_all[events_all[:, 2] == event, :]))
if not self.tICA:
events = events[:, 0]
else:
events = []
if self.tICA and not fn_inv:
print ">>> ERROR: For applying temporal ICA in source space the file name "
print " of the inverse operator is required!"
import pdb
pdb.set_trace()
# ------------------------------------------
# check if ICA should be applied in source
# space
# ------------------------------------------
if fn_inv:
# ------------------------------------------
# check if temporal ICA should be applied
# on data transformed to source space
# --> note: here data are not transformed
# to Fourier space
# ------------------------------------------
if self.tICA:
# -------------------------------------------
# check if all necessary parameters are set
# -------------------------------------------
if not stim_name:
print ">>> ERROR: For applying temporal ICA in source space a stimulus name is required!"
import pdb
pdb.set_trace()
# -------------------------------------------
# generate epochs around stimulus onset
# -------------------------------------------
from mne import Epochs
epoch_data = Epochs(meg_raw, events, event_id,
tmin_stim, tmax_stim,
picks=meg_channels, baseline=baseline,
proj=False, verbose=False)
if averaged_epochs:
X = epoch_data.average().data.transpose()
X = X.reshape([X.shape[0], 1, X.shape[1]])
else:
X = epoch_data.get_data().transpose([2, 0, 1])
# ------------------------------------------
# FourierICA is applied on data transformed
# to source space
# ------------------------------------------
else:
# -------------------------------------------
# transform data to STFT
# -------------------------------------------
# print out some information
if verbose:
print ">>> transform data to Fourier space..."
win_length_sec = tmax_stim - tmin_stim
X, _ = apply_stft(meg_data, events=events, tpre=tmin_stim,
sfreq=sfreq, flow=flow, fhigh=fhigh,
win_length_sec=win_length_sec,
hamming_data=hamming_data,
remove_outliers=remove_outliers,
baseline=baseline,
decim_epochs=decim_epochs,
unfiltered=unfiltered,
verbose=verbose)
if np.any(contrast_id):
X_contrast, _ = apply_stft(meg_data, events=contrast_events,
tpre=tmin_stim, sfreq=sfreq,
flow=flow, fhigh=fhigh,
win_length_sec=win_length_sec,
hamming_data=hamming_data,
remove_outliers=remove_outliers,
baseline=baseline,
decim_epochs=decim_epochs,
verbose=verbose)
# -------------------------------------------
# perform source localization
# -------------------------------------------
# print out some information
if verbose:
print ">>> estimate inverse solution..."
src_loc_data, vertno = stft_source_localization(X, fn_inv,
method=self.src_loc_method,
morph2fsaverage=self.morph2fsaverage,
snr=self.snr)
if np.any(contrast_id):
src_loc_data_contrast, _ = stft_source_localization(X_contrast, fn_inv,
method=self.src_loc_method,
morph2fsaverage=self.morph2fsaverage,
snr=self.snr)
del _
n_epochs = np.min([src_loc_data.shape[1], src_loc_data_contrast.shape[1]])
events = events[:n_epochs]
src_loc_data = src_loc_data[:, :n_epochs, :] - src_loc_data_contrast[:, :n_epochs, :]
data_already_stft = True
meg_data = X
# ------------------------------------------
# FourierICA would be applied on
# data in the sensor space
# ------------------------------------------
else:
data_already_stft = False
vertno = None
src_loc_data = None
return meg_data, src_loc_data, vertno, data_already_stft, events, sfreq, meg_channels
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# perform ICASSO based FourierICA signal decomposition
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def fit_tICA(self, ica_data, verbose=True):
# ------------------------------------------
# import necessary module
# ------------------------------------------
from ica import ica_array
from scipy.linalg import pinv
# ------------------------------------------
# print out some information
# ------------------------------------------
if verbose:
print ">>>>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<<<<<<<"
print ">>> Performing %s estimation" % self.ica_method
print ">>>>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<<<<<<<"
# ------------------------------------------
# initialize some data
# ------------------------------------------
pca = None
# ------------------------------------------
# perform ICASSO ICA
# ------------------------------------------
for irep in range(self.nrep):
weights, pca, activations = ica_array(ica_data,
overwrite=None, pca=pca,
max_pca_components=self.pca_dim,
method=self.ica_method,
cost_func=self.cost_function,
weights=None, lrate=self.lrate,
wchange=self.conv_eps,
maxsteps=self.max_iter,
verbose=verbose)
if irep == 0:
self.whitenMat = pca.components_
self.dewhitenMat = pinv(pca.components_)
self.dmean = pca.mean_
self.dstd = pca.stddev_
# save results in structure
W_orig = np.dot(weights, self.whitenMat)
A_orig = np.dot(self.dewhitenMat, pinv(weights))
self.W_est.append(W_orig)
self.A_est.append(A_orig)
# print out some information
if verbose and self.nrep > 1:
print ">>> Running %s number %d of %d done" % (self.ica_method, irep+1, self.nrep)
if irep == 0:
print "..... %s parameter:" % self.ica_method
print "....."
print "..... Stopping threshold: %d" % self.conv_eps
print "..... Maximal number of iterations: %d" % self.max_iter
print "..... Learning rate: %d" % self.lrate
print "..... Number of independent components: %d" % self.pca_dim
print "....."
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# perform ICASSO based FourierICA signal decomposit ion
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _fit_FourierICA(self, ica_data, events, sfreq,
complex_mixing=True, hamming_data=False,
remove_outliers=False, envelopeICA=False,
normalized=True, data_already_stft=False,
verbose=True):
# ------------------------------------------
# import necessary module
# ------------------------------------------
from fourier_ica import JuMEG_fourier_ica
# ------------------------------------------
# generate FourierICA object
# ------------------------------------------
if verbose:
print ">>>>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<<<<<<<"
print ">>> Performing FourierICA estimation"
print ">>>>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<<<<<<<"
win_length_sec = self.tmax_win - self.tmin_win
fourier_ica_obj = JuMEG_fourier_ica(events=events, tpre=self.tmin_win,
flow=self.flow, fhigh=self.fhigh,
sfreq=sfreq,
win_length_sec=win_length_sec,
remove_outliers=remove_outliers,
hamming_data=hamming_data,
complex_mixing=complex_mixing,
pca_dim=self.pca_dim,
max_iter=self.max_iter,
conv_eps=self.conv_eps,
cost_function=self.cost_function,
envelopeICA=envelopeICA,
lrate=self.lrate,
decim_epochs=self.decim_epochs)
# ------------------------------------------
# initialize some data
# ------------------------------------------
whitenMat = []
dewhitenMat = []
# ------------------------------------------
# perform ICASSO ICA
# ------------------------------------------
for irep in range(self.nrep):
# apply FourierICA
if self.nrep == 1:
verbose_fourierICA = verbose
else:
verbose_fourierICA = False
W_orig, A_orig, S_FT, Smean, Sstddev, objective, whitenMat, \
dewhitenMat = fourier_ica_obj.fit(ica_data, whiten_mat=whitenMat,
dewhiten_mat=dewhitenMat,
data_already_stft=data_already_stft,
data_already_normalized=normalized,
verbose=verbose_fourierICA)
if irep == 0:
self.whitenMat = whitenMat
self.dewhitenMat = dewhitenMat
self.dmean = Smean
self.dstd = Sstddev
# save results in structure
self.W_est.append(W_orig)
self.A_est.append(A_orig)
# print out some information
if verbose and self.nrep > 1:
print ">>> Running FourierICA number %d of %d done" % (irep+1, self.nrep)
if irep == 0:
str_hamming_window = "True" if fourier_ica_obj.hamming_data else "False"
str_complex_mixing = "True" if fourier_ica_obj.complex_mixing else "False"
print "..... Fourier ICA parameter:"
print "....."
print "..... Sampling frequency set to: %d" % fourier_ica_obj.sfreq
print "..... Start of frequency band set to: %d" % fourier_ica_obj.flow
print "..... End of frequency band set to: %d" % fourier_ica_obj.fhigh
print "..... Using hamming window: %s" % str_hamming_window
print "..... Assume complex mixing: %s" % str_complex_mixing
print "..... Number of independent components: %d" % fourier_ica_obj.ica_dim
print "....."
return fourier_ica_obj
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# perform ICASSO based ICA signal decomposition
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def fit(self, fn_raw, ica_method=None, average=False, stim_name=None,
event_id=None, stim_delay=0, corr_event_picking=None,
tmin_win=None, tmax_win=None, flow=None, fhigh=None,
dim_reduction=None, pca_dim=None,
max_iter=None, conv_eps=None, complex_mixing=True,
hamming_data=False, remove_outliers=False,
envelopeICA=False, fn_inv=None, cost_function=None,
contrast_id=[], baseline=(None, None),
decim_epochs=False, interpolate_bads=True, verbose=True):
"""
Perform ICASSO estimation. ICASSO is based on running ICA
multiple times with slightly different conditions and
clustering the obtained components. Note, here as default
FourierICA is applied.
Parameters
----------
fn_raw: filename of the input data (expect fif-file).
ica_method: Steing containing the information which ICA
method should be applied. You can choose between
'extended-infomax', 'fastica', 'infomax' and
'fourierica'
default: ica_method='fourierica'
average: Should data be averaged across subjects before
FourierICA application? Note, averaged data require
less memory!
default: average=False
stim_name: name of the stimulus channel. Note, for
applying FourierCIA data are chopped around stimulus
onset. If not set data are chopped in overlapping
windows
default: stim_names=None
event_id: Id of the event of interest to be considered in
the stimulus channel. Only of interest if 'stim_name'
is set
default: event_id=1
stim_delay: delay of stimulus presentation in milliseconds
default: stim_delay=0
corr_event_picking: if set should contain the complete python
path and name of the function used to identify only the
correct events
tmin_win: time of interest prior to stimulus onset.
Important for generating epochs to apply FourierICA
default = 0.0
tmax_win: time of interest after stimulus onset.
Important for generating epochs to apply FourierICA
default = 1.0
flow: lower frequency border for estimating the optimal
de-mixing matrix using FourierICA
default: flow=4.0
fhigh: upper frequency border for estimating the optimal
de-mixing matrix using FourierICA
default: fhigh=34.0
Note: here default flow and fhigh are choosen to
contain:
- theta (4-7Hz)
- low (7.5-9.5Hz) and high alpha (10-12Hz),
- low (13-23Hz) and high beta (24-34Hz)
dim_reduction: {'', 'AIC', 'BIC', 'GAP', 'MDL', 'MIBS', 'explVar'}
Method for dimension selection. For further information about
the methods please check the script 'dimension_selection.py'.
pca_dim: The number of PCA components used to apply FourierICA.
If pca_dim > 1 this refers to the exact number of components.
If between 0 and 1 pca_dim refers to the variance which
should be explained by the chosen components
default: pca_dim=0.9
max_iter: maximum number od iterations used in FourierICA
default: max_iter=2000
conv_eps: iteration stops when weight changes are smaller
then this number
default: conv_eps = 1e-9
complex_mixing: if mixing matrix should be real or complex
default: complex_mixing=True
hamming_data: if set a hamming window is applied to each
epoch prior to Fourier transformation
default: hamming_data=False
remove_outliers: If set outliers are removed from the Fourier
transformed data.
Outliers are defined as windows with large log-average power (LAP)
LAP_{c,t}=log \sum_{f}{|X_{c,tf}|^2
where c, t and f are channels, window time-onsets and frequencies,
respectively. The threshold is defined as |mean(LAP)+3 std(LAP)|.
This process can be bypassed or replaced by specifying a function
handle as an optional parameter.
remove_outliers=False
envelopeICA: if set ICA is estimated on the envelope
of the Fourier transformed input data, i.e., the
mixing model is |x|=As
default: envelopeICA=False
fn_inv: file name of inverse operator. If given
FourierICA is applied on data transformed to
source space
cost_function: which cost-function should be used in the complex
ICA algorithm
'g1': g_1(y) = 1 / (2 * np.sqrt(lrate + y))
'g2': g_2(y) = 1 / (lrate + y)
'g3': g_3(y) = y
contrast_id: If set FourierICA is applied to contrast epochs
between events in event_id and events in contrast_id.
is set
default: contrast_id=[]
baseline: If set baseline correction is applied to epochs prior to
ICA estimation.
decim_epochs: if set the number of epochs will be reduced (per
subject) to that number for the estimation of the demixing matrix.
Note: the epochs were chosen randomly from the complete set of
epochs.
interpolate_bads: if set bad channels are interpolated (using the
mne routine raw.interpolate_bads()).
verbose: bool, str, int, or None
If not None, override default verbose level
(see mne.verbose).
default: verbose=True
Returns
-------
W: estimated optimal de-mixing matrix
A: estimated mixing matrix
Iq: quality index of the clustering between
components belonging to one cluster
(between 0 and 1; 1 refers to small clusters,
i.e., components in one cluster have a highly similar)
fourier_ica_obj: FourierICA object. For further information
please have a look into the FourierICA routine
"""
# ------------------------------------------
# import necessary module
# ------------------------------------------
from mne import set_log_level
# set log level to 'WARNING'
set_log_level('WARNING')
# ------------------------------------------
# check input parameter
# ------------------------------------------
if ica_method:
self.ica_method = ica_method
if average:
self.average = average
if fn_inv:
self.fn_inv = fn_inv
if cost_function:
self.cost_function = cost_function
if dim_reduction:
self.dim_reduction = dim_reduction
if pca_dim:
self.pca_dim = pca_dim
if stim_name:
self.stim_name = stim_name
if event_id:
self.event_id = event_id
if tmin_win:
self.tmin_win = tmin_win
if tmax_win:
self.tmax_win = tmax_win
if flow:
self.flow = flow
if fhigh:
self.fhigh = fhigh
if max_iter:
self.max_iter = max_iter
if conv_eps:
self.conv_eps = conv_eps
if decim_epochs:
self.decim_epochs = decim_epochs
# ------------------------------------------
# check which ICA algorithm should be
# applied
# ------------------------------------------
if self.ica_method in ['extended-infomax', 'fastica', 'infomax']:
self.tICA = True
if not self.cost_function in ['logcosh', 'exp', 'cube']:
self.cost_function = 'logcosh'
elif self.ica_method == 'fourierica':
self.tICA = False
else:
print 'WARNING: chosen ICA method does not exist!'
print 'Programm stops!'
import pdb
pdb.set_trace()
# ------------------------------------------
# prepare data to apply ICASSO
# ------------------------------------------
# check if fn_raw is a list, i.e., group FourierICA
# should be applied
if isinstance(fn_raw, list):
# test if FourierICA should be applied
if self.ica_method != 'fourierica':
print ">>> NOTE: When using temporal group ICA it is recommended " \
"to use ICA based on averaged datasets"
print ">>> Parameters are set for group ICA!"
average_epochs = True
self.average = False
else:
average_epochs = False
# loop over all files
for idx, fnraw in enumerate(fn_raw):
meg_data_cur, src_loc, vert, data_already_stft, events, sfreq, picks = \
self.prepare_data_for_fit(fnraw, stim_name=self.stim_name,
tmin_stim=self.tmin_win, tmax_stim=self.tmax_win,
flow=self.flow, fhigh=self.fhigh, event_id=self.event_id,
corr_event_picking=corr_event_picking, stim_delay=stim_delay,
fn_inv=self.fn_inv[idx], hamming_data=hamming_data,
remove_outliers=remove_outliers,
contrast_id=contrast_id, baseline=baseline,
averaged_epochs=average_epochs,
decim_epochs=self.decim_epochs,
interpolate_bads=interpolate_bads,
verbose=verbose)
# normalize source data
fftsize, nwindows, nvoxel = src_loc.shape
nrows_Xmat_c = fftsize*nwindows
src_loc = src_loc.reshape((nrows_Xmat_c, nvoxel), order='F')
dmean = np.mean(src_loc, axis=0)
dstddev = np.std(src_loc, axis=0)
# ---------------------------------
# save all data in one matrix
# ---------------------------------
if self.average:
if self.ica_method == 'fourierica':
if idx == 0:
nfn_raw = len(fn_raw)
src_loc_data = np.zeros((nrows_Xmat_c, nvoxel), dtype=np.complex)
meg_data = np.zeros((fftsize, nwindows, 248), dtype=np.complex)
nwindows_min = nwindows
# check if result arrays must be reduced
if nwindows_min > nwindows:
nwindows_min = nwindows
src_loc_data = src_loc_data[:(nwindows_min*fftsize), :]
meg_data = meg_data[:, :nwindows_min, :]
src_loc_data += (src_loc[:(nwindows_min*fftsize), :] - dmean[np.newaxis, :]) / \
(dstddev[np.newaxis, :]*nfn_raw)
meg_data[:, :, picks] += (meg_data_cur[:, :nwindows_min, :]/nfn_raw)
else:
if idx == 0:
nfn_raw = len(fn_raw)
src_loc_data = np.zeros((nfn_raw*nrows_Xmat_c, nvoxel), dtype=np.complex)
meg_data = np.zeros((fftsize, nfn_raw*nwindows, 248), dtype=np.complex)
src_loc_data[(idx*nrows_Xmat_c):((idx+1)*nrows_Xmat_c), :] = \
(src_loc - dmean[np.newaxis, :]) / dstddev[np.newaxis, :]
meg_data[:, (idx*nwindows):((idx+1)*nwindows), picks] = meg_data_cur
# ---------------------------------
# free some me
# ---------------------------------
del meg_data_cur, src_loc, dmean, dstddev
normalized = True
else:
meg_data, src_loc_data, vertno, data_already_stft, events, sfreq, picks = \
self.prepare_data_for_fit(fn_raw, stim_name=self.stim_name,
tmin_stim=self.tmin_win, tmax_stim=self.tmax_win,
flow=self.flow, fhigh=self.fhigh, event_id=self.event_id,
stim_delay=stim_delay, corr_event_picking=corr_event_picking,
fn_inv=self.fn_inv, hamming_data=hamming_data,
remove_outliers=remove_outliers, baseline=baseline,
decim_epochs=self.decim_epochs, interpolate_bads=interpolate_bads,
verbose=verbose)
normalized = False
self._sfreq = sfreq
# ------------------------------------------
# check if PCA dimension is set...if not
# use MIBS to estimate the dimension
# ------------------------------------------
if not self.pca_dim:
# import some modules
from .complex_ica import cov
from scipy.linalg import eigh
from .dimension_selection import aic_mdl, mibs, gap
# concatenate STFT for consecutive windows in each channel
fftsize, nwindows, nchan = meg_data.shape
nrows_Xmat_c = fftsize*nwindows
Xmat_c = meg_data.reshape((nrows_Xmat_c, nchan), order='F')
covmat = cov(Xmat_c, rowvar=0)
Dc, Ec = eigh(covmat.real)
idx_sort = np.argsort(Dc.real)[::-1]
Dc = Dc[idx_sort].real
ntsl = Xmat_c.shape[0]
if self.dim_reduction == 'AIC':
pca_dim, _ = aic_mdl(Dc)
elif self.dim_reduction == 'BIC':
pca_dim = mibs(Dc, ntsl, use_bic=True)
elif self.dim_reduction == 'MIBS':
pca_dim = mibs(Dc, ntsl)
elif self.dim_reduction =='GAP':
pca_dim = gap(Dc)
else: # self.dim_reduction == 'MDL'
_, pca_dim = aic_mdl(Dc)
if pca_dim > 60:
print "WARNING: You have %d PCA components!" % (pca_dim)
print "Using now explained variance..."
explVar = np.abs(Dc.copy())
explVar /= explVar.sum()
pca_dim = np.sum(explVar.cumsum() <= 0.9) + 1
print "Dimension is now: %d components!" % (pca_dim)
self.pca_dim = pca_dim
del Xmat_c, covmat, Ec, idx_sort, Dc, ntsl, _
# ------------------------------------------
# check if ICA should be applied in sensor
# or source space
# ------------------------------------------
if self.fn_inv:
ica_data = src_loc_data
else:
ica_data = meg_data.copy()
# ------------------------------------------
# check which ICA algorithm should be
# applied
# ------------------------------------------
if self.ica_method in ['extended-infomax', 'fastica', 'infomax']:
self.fit_tICA(ica_data.real.T, verbose=verbose)
fourier_ica_obj = None
elif self.ica_method == 'fourierica':
fourier_ica_obj = self._fit_FourierICA(ica_data, events, sfreq,
complex_mixing=complex_mixing,
hamming_data=hamming_data,
remove_outliers=remove_outliers,
envelopeICA=envelopeICA,
normalized=normalized,
data_already_stft=data_already_stft,
verbose=verbose)
else:
print 'WARNING: chosen ICA method does not exist!'
print 'Programm stops!'
import pdb
pdb.set_trace()
# ------------------------------------------
# perform cluster analysis
# ------------------------------------------
if self.nrep == 1:
if verbose:
print ">>>"
print ">>> No clustering required as only one ICASSO repetition was performed..."
W = self.W_est[0]
A = self.A_est[0]
Iq = np.zeros(W.shape[0])
else:
if verbose:
print ">>>"
print ">>>>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<<<<<<<"
print ">>> Performing cluster analysis <<<"
print ">>>>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<<<<<<<"
Z, order, partitions, indexR, dis, sim = self._cluster()
proj = self._projection(dis)
A, W, Iq = self._get_results(partitions, sim)
# ------------------------------------------
# return results
# ------------------------------------------
return W, A, Iq, fourier_ica_obj
| bsd-3-clause |
loli/semisupervisedforests | sklearn/cluster/__init__.py | 12 | 1331 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, Ward, WardAgglomeration,
AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'Ward',
'WardAgglomeration',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
CforED/Machine-Learning | sklearn/tests/test_common.py | 26 | 8388 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.decomposition import ProjectedGradientNMF
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance,
check_fit2d_predict1d,
check_fit1d_1sample)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
if issubclass(Estimator, ProjectedGradientNMF):
# The ProjectedGradientNMF class is deprecated
with ignore_warnings():
yield check, name, Estimator
else:
yield check, name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
yield check_class_weight_balanced_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
if issubclass(Estimator, ProjectedGradientNMF):
# The ProjectedGradientNMF class is deprecated
with ignore_warnings():
estimator = Estimator()
else:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
if isinstance(estimator, ProjectedGradientNMF):
# The ProjectedGradientNMF class is deprecated
with ignore_warnings():
yield check_transformer_n_iter, name, estimator
else:
yield check_transformer_n_iter, name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False, include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
# The ProjectedGradientNMF class is deprecated
if issubclass(Estimator, ProjectedGradientNMF):
with ignore_warnings():
yield check_get_params_invariance, name, Estimator
else:
yield check_get_params_invariance, name, Estimator
| bsd-3-clause |
chenyyx/scikit-learn-doc-zh | examples/zh/neural_networks/plot_mnist_filters.py | 76 | 2189 | """
=====================================
Visualization of MLP weights on MNIST
=====================================
Sometimes looking at the learned coefficients of a neural network can provide
insight into the learning behavior. For example if weights look unstructured,
maybe some were not used at all, or if very large coefficients exist, maybe
regularization was too low or the learning rate too high.
This example shows how to plot some of the first layer weights in a
MLPClassifier trained on the MNIST dataset.
The input data consists of 28x28 pixel handwritten digits, leading to 784
features in the dataset. Therefore the first layer weight matrix have the shape
(784, hidden_layer_sizes[0]). We can therefore visualize a single column of
the weight matrix as a 28x28 pixel image.
To make the example run faster, we use very few hidden units, and train only
for a very short time. Training longer would result in weights with a much
smoother spatial appearance.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_mldata
from sklearn.neural_network import MLPClassifier
mnist = fetch_mldata("MNIST original")
# rescale the data, use the traditional train/test split
X, y = mnist.data / 255., mnist.target
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
# mlp = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
# solver='sgd', verbose=10, tol=1e-4, random_state=1)
mlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4,
solver='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
mlp.fit(X_train, y_train)
print("Training set score: %f" % mlp.score(X_train, y_train))
print("Test set score: %f" % mlp.score(X_test, y_test))
fig, axes = plt.subplots(4, 4)
# use global min / max to ensure all weights are shown on the same scale
vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()
for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin,
vmax=.5 * vmax)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| gpl-3.0 |
loli/semisupervisedforests | examples/ensemble/plot_gradient_boosting_regularization.py | 352 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
bellwethers-in-se/defects | src/old/methods1.py | 1 | 2845 | #! /Users/rkrsn/anaconda/bin/python
import sys
import os
from os import getcwd
from os import walk
from os.path import expanduser
# Update PYTHONPATH
root = os.path.join(os.getcwd().split('src')[0], 'src')
if root not in sys.path:
sys.path.append(root)
from pdb import set_trace
HOME = expanduser('~')
axe = root + '/lib/axe/axe/' # AXE
pystat = root + '/lib/pystats/' # PySTAT
cwd = getcwd() # Current Directory
sys.path.extend([axe, pystat, './where2'])
from dtree import *
from table import *
# from w2 import *
from w2 import where2, prepare, leaves
from makeAmodel import makeAModel
# import matplotlib.pyplot as plt
def toPandas():
import pandas as pd
def explore(dir):
datasets = []
for (dirpath, dirnames, filenames) in walk(dir):
datasets.append(dirpath)
training = []
testing = []
for k in datasets[1:]:
train = [[dirPath, fname] for dirPath, _, fname in walk(k)]
test = [train[0][0] + '/' + train[0][1].pop(-1)]
training.append(
[train[0][0] + '/' + p for p in train[0][1] if not p == '.DS_Store'])
testing.append(test)
return training, testing
def newTable(tbl, headerLabel, Rows):
tbl2 = clone(tbl)
newHead = Sym()
newHead.col = len(tbl.headers)
newHead.name = headerLabel
tbl2.headers = tbl.headers + [newHead]
return clone(tbl2, rows=Rows)
def createTbl(
data,
settings=None,
_smote=False,
isBin=False,
bugThres=1,
duplicate=False):
"""
kwargs:
_smote = True/False : SMOTE input data (or not)
_isBin = True/False : Reduce bugs to defects/no defects
_bugThres = int : Threshold for marking stuff as defective,
default = 1. Not defective => Bugs < 1
"""
makeaModel = makeAModel()
_r = []
for t in data:
m = makeaModel.csv2py(t, _smote=_smote, duplicate=duplicate)
_r += m._rows
m._rows = _r
# Initialize all parameters for where2 to run
prepare(m, settings=None)
tree = where2(m, m._rows) # Decision tree using where2
tbl = table(t)
headerLabel = '=klass'
Rows = []
for k, _ in leaves(tree): # for k, _ in leaves(tree):
for j in k.val:
tmp = j.cells
if isBin:
tmp[-1] = 0 if tmp[-1] < bugThres else 1
tmp.append('_' + str(id(k) % 1000))
j.__dict__.update({'cells': tmp})
Rows.append(j.cells)
return newTable(tbl, headerLabel, Rows)
def test_createTbl():
dir = '../Data/camel/camel-1.6.csv'
newTbl = createTbl([dir], _smote=False)
newTblSMOTE = createTbl([dir], _smote=True)
print(len(newTbl._rows), len(newTblSMOTE._rows))
def drop(test, tree):
loc = apex(test, tree)
return loc
if __name__ == '__main__':
test_createTbl()
| mit |
GiulioGx/RNNs | sources/metrics/ErrorMonitor.py | 1 | 1102 | from infos.Info import Info
from infos.InfoElement import PrintableInfoElement
from infos.InfoGroup import InfoGroup
from infos.InfoList import InfoList
from metrics.RealValuedMonitor import RealValuedMonitor
from datasets.Dataset import Dataset
class ErrorMonitor(RealValuedMonitor):
def __init__(self, dataset: Dataset, error_fnc):
super().__init__(100)
self.__dataset = dataset
self.__best_error = 100
self.__error_fnc = error_fnc
def update(self, measures: list):
new_value = RealValuedMonitor.mean(measures, 0)
if new_value < self.__best_error:
self.__best_error = new_value
self._current_value = new_value
def get_symbols(self, y, t, mask) -> list:
return [self.__error_fnc(y=y, t=t, mask=mask)] # XXX
@property
def info(self) -> Info:
error_info = PrintableInfoElement('curr', ':.2%', self._current_value)
best_info = PrintableInfoElement('best', ':.2%', self.__best_error)
error_group = InfoGroup('error', InfoList(error_info, best_info))
return error_group
| lgpl-3.0 |
janezhango/BigDataMachineLearning | py/testdir_0xdata_only/test_from_hdfs_hosts.py | 2 | 4197 | import unittest, time, sys, random
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_browse as h2b, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(3,
use_hdfs=True, hdfs_version='cdh3', hdfs_name_node='192.168.1.176')
else:
h2o_hosts.build_cloud_with_hosts(
use_hdfs=True, hdfs_version='cdh3', hdfs_name_node='192.168.1.176')
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_B_hdfs_files(self):
# larger set in my local dir
# fails because classes aren't integers
# "allstate_claim_prediction_train_set.zip",
csvFilenameAll = [
"3G_poker_shuffle",
"TEST-poker1000.csv",
# corrupt zip file?
# "allstate_claim_prediction_train_set.zip",
"and-testing.data",
"arcene2_train.both",
"arcene_train.both",
"bestbuy_test.csv",
"bestbuy_train.csv",
"billion_rows.csv.gz",
"covtype.13x.data",
"covtype.13x.shuffle.data",
"covtype.169x.data",
"covtype.4x.shuffle.data",
"covtype.data",
"covtype4x.shuffle.data",
"hhp.unbalanced.012.1x11.data.gz",
"hhp.unbalanced.012.data.gz",
"hhp.unbalanced.data.gz",
"hhp2.os.noisy.0_1.data",
"hhp2.os.noisy.9_4.data",
"hhp_9_14_12.data",
"leads.csv",
"prostate_long_1G.csv",
]
# pick 8 randomly!
if (1==0):
csvFilenameList = random.sample(csvFilenameAll,8)
# Alternatively: do the list in order! Note the order is easy to hard
else:
csvFilenameList = csvFilenameAll
# pop open a browser on the cloud
h2b.browseTheCloud()
timeoutSecs = 1000
# save the first, for all comparisions, to avoid slow drift with each iteration
firstglm = {}
for csvFilename in csvFilenameList:
# creates csvFilename.hex from file in hdfs dir
start = time.time()
print 'Parsing', csvFilename
csvPathname = "datasets/" + csvFilename
parseResult = h2i.import_parse(path=csvPathname, schema='hdfs',
timeoutSecs=timeoutSecs, retryDelaySecs=1.0)
print csvFilename, '\nparse time (python)', time.time() - start, 'seconds'
print csvFilename, '\nparse time (h2o):', parseResult['response']['time']
### print h2o.dump_json(parseResult['response'])
print "parse result:", parseResult['destination_key']
# I use this if i want the larger set in my localdir
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
### print h2o.dump_json(inspect)
cols = inspect['cols']
# look for nonzero num_missing_values count in each col
for i, colDict in enumerate(cols):
num_missing_values = colDict['num_missing_values']
if num_missing_values != 0:
### print "%s: col: %d, num_missing_values: %d" % (csvFilename, i, num_missing_values)
pass
### print h2o.dump_json(cols[0])
num_cols = inspect['num_cols']
num_rows = inspect['num_rows']
row_size = inspect['row_size']
ptype = inspect['type']
value_size_bytes = inspect['value_size_bytes']
response = inspect['response']
ptime = response['time']
print "num_cols: %s, num_rows: %s, row_size: %s, ptype: %s, \
value_size_bytes: %s, response: %s, time: %s" % \
(num_cols, num_rows, row_size, ptype, value_size_bytes, response, ptime)
# h2b.browseJsonHistoryAsUrlLastMatch("Inspect")
print "\n" + csvFilename
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
CforED/Machine-Learning | sklearn/decomposition/__init__.py | 74 | 1490 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF, non_negative_factorization
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'non_negative_factorization',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
nelson-liu/scikit-learn | examples/ensemble/plot_forest_iris.py | 332 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
nelson-liu/scikit-learn | examples/applications/plot_prediction_latency.py | 83 | 11395 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
from sklearn.utils import shuffle
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[[i], :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
random_seed = 13
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=n_train, random_state=random_seed)
X_train, y_train = shuffle(X_train, y_train, random_state=random_seed)
X_scaler = StandardScaler()
X_train = X_scaler.fit_transform(X_train)
X_test = X_scaler.transform(X_test)
y_scaler = StandardScaler()
y_train = y_scaler.fit_transform(y_train[:, None])[:, 0]
y_test = y_scaler.transform(y_test[:, None])[:, 0]
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[[0]])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
espg/scikit-learn | sklearn/linear_model/tests/test_huber.py | 17 | 7463 | # Authors: Manoj Kumar mks542@nyu.edu
# License: BSD 3 clause
import numpy as np
from scipy import optimize, sparse
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.datasets import make_regression
from sklearn.linear_model import HuberRegressor, LinearRegression, SGDRegressor, Ridge
from sklearn.linear_model._huber import _huber_loss_and_gradient
def make_regression_with_outliers(n_samples=50, n_features=20):
rng = np.random.RandomState(0)
# Generate data with outliers by replacing 10% of the samples with noise.
X, y = make_regression(
n_samples=n_samples, n_features=n_features, random_state=0, noise=0.05
)
# Replace 10% of the sample with noise.
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
return X, y
def test_huber_equals_lr_for_high_epsilon():
# Test that Ridge matches LinearRegression for large epsilon
X, y = make_regression_with_outliers()
lr = LinearRegression()
lr.fit(X, y)
huber = HuberRegressor(epsilon=1e3, alpha=0.0)
huber.fit(X, y)
assert_almost_equal(huber.coef_, lr.coef_, 3)
assert_almost_equal(huber.intercept_, lr.intercept_, 2)
def test_huber_max_iter():
X, y = make_regression_with_outliers()
huber = HuberRegressor(max_iter=1)
huber.fit(X, y)
assert huber.n_iter_ == huber.max_iter
def test_huber_gradient():
# Test that the gradient calculated by _huber_loss_and_gradient is correct
rng = np.random.RandomState(1)
X, y = make_regression_with_outliers()
sample_weight = rng.randint(1, 3, (y.shape[0]))
def loss_func(x, *args):
return _huber_loss_and_gradient(x, *args)[0]
def grad_func(x, *args):
return _huber_loss_and_gradient(x, *args)[1]
# Check using optimize.check_grad that the gradients are equal.
for _ in range(5):
# Check for both fit_intercept and otherwise.
for n_features in [X.shape[1] + 1, X.shape[1] + 2]:
w = rng.randn(n_features)
w[-1] = np.abs(w[-1])
grad_same = optimize.check_grad(
loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight
)
assert_almost_equal(grad_same, 1e-6, 4)
def test_huber_sample_weights():
# Test sample_weights implementation in HuberRegressor"""
X, y = make_regression_with_outliers()
huber = HuberRegressor()
huber.fit(X, y)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
# Rescale coefs before comparing with assert_array_almost_equal to make
# sure that the number of decimal places used is somewhat insensitive to
# the amplitude of the coefficients and therefore to the scale of the
# data and the regularization parameter
scale = max(np.mean(np.abs(huber.coef_)), np.mean(np.abs(huber.intercept_)))
huber.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale, huber_intercept / scale)
X, y = make_regression_with_outliers(n_samples=5, n_features=20)
X_new = np.vstack((X, np.vstack((X[1], X[1], X[3]))))
y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]]))
huber.fit(X_new, y_new)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
sample_weight = np.ones(X.shape[0])
sample_weight[1] = 3
sample_weight[3] = 2
huber.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale, huber_intercept / scale)
# Test sparse implementation with sample weights.
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor()
huber_sparse.fit(X_csr, y, sample_weight=sample_weight)
assert_array_almost_equal(huber_sparse.coef_ / scale, huber_coef / scale)
def test_huber_sparse():
X, y = make_regression_with_outliers()
huber = HuberRegressor(alpha=0.1)
huber.fit(X, y)
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(alpha=0.1)
huber_sparse.fit(X_csr, y)
assert_array_almost_equal(huber_sparse.coef_, huber.coef_)
assert_array_equal(huber.outliers_, huber_sparse.outliers_)
def test_huber_scaling_invariant():
# Test that outliers filtering is scaling independent.
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=False, alpha=0.0)
huber.fit(X, y)
n_outliers_mask_1 = huber.outliers_
assert not np.all(n_outliers_mask_1)
huber.fit(X, 2.0 * y)
n_outliers_mask_2 = huber.outliers_
assert_array_equal(n_outliers_mask_2, n_outliers_mask_1)
huber.fit(2.0 * X, 2.0 * y)
n_outliers_mask_3 = huber.outliers_
assert_array_equal(n_outliers_mask_3, n_outliers_mask_1)
def test_huber_and_sgd_same_results():
# Test they should converge to same coefficients for same parameters
X, y = make_regression_with_outliers(n_samples=10, n_features=2)
# Fit once to find out the scale parameter. Scale down X and y by scale
# so that the scale parameter is optimized to 1.0
huber = HuberRegressor(fit_intercept=False, alpha=0.0, epsilon=1.35)
huber.fit(X, y)
X_scale = X / huber.scale_
y_scale = y / huber.scale_
huber.fit(X_scale, y_scale)
assert_almost_equal(huber.scale_, 1.0, 3)
sgdreg = SGDRegressor(
alpha=0.0,
loss="huber",
shuffle=True,
random_state=0,
max_iter=10000,
fit_intercept=False,
epsilon=1.35,
tol=None,
)
sgdreg.fit(X_scale, y_scale)
assert_array_almost_equal(huber.coef_, sgdreg.coef_, 1)
def test_huber_warm_start():
X, y = make_regression_with_outliers()
huber_warm = HuberRegressor(alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1)
huber_warm.fit(X, y)
huber_warm_coef = huber_warm.coef_.copy()
huber_warm.fit(X, y)
# SciPy performs the tol check after doing the coef updates, so
# these would be almost same but not equal.
assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1)
assert huber_warm.n_iter_ == 0
def test_huber_better_r2_score():
# Test that huber returns a better r2 score than non-outliers"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(alpha=0.01)
huber.fit(X, y)
linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y
mask = np.abs(linear_loss) < huber.epsilon * huber.scale_
huber_score = huber.score(X[mask], y[mask])
huber_outlier_score = huber.score(X[~mask], y[~mask])
# The Ridge regressor should be influenced by the outliers and hence
# give a worse score on the non-outliers as compared to the huber
# regressor.
ridge = Ridge(alpha=0.01)
ridge.fit(X, y)
ridge_score = ridge.score(X[mask], y[mask])
ridge_outlier_score = ridge.score(X[~mask], y[~mask])
assert huber_score > ridge_score
# The huber model should also fit poorly on the outliers.
assert ridge_outlier_score > huber_outlier_score
def test_huber_bool():
# Test that it does not crash with bool data
X, y = make_regression(n_samples=200, n_features=2, noise=4.0, random_state=0)
X_bool = X > 0
HuberRegressor().fit(X_bool, y)
| bsd-3-clause |
espg/scikit-learn | sklearn/ensemble/__init__.py | 17 | 1502 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification, regression and anomaly detection.
"""
from ._base import BaseEnsemble
from ._forest import RandomForestClassifier
from ._forest import RandomForestRegressor
from ._forest import RandomTreesEmbedding
from ._forest import ExtraTreesClassifier
from ._forest import ExtraTreesRegressor
from ._bagging import BaggingClassifier
from ._bagging import BaggingRegressor
from ._iforest import IsolationForest
from ._weight_boosting import AdaBoostClassifier
from ._weight_boosting import AdaBoostRegressor
from ._gb import GradientBoostingClassifier
from ._gb import GradientBoostingRegressor
from ._voting import VotingClassifier
from ._voting import VotingRegressor
from ._stacking import StackingClassifier
from ._stacking import StackingRegressor
from ._hist_gradient_boosting.gradient_boosting import (
HistGradientBoostingRegressor,
HistGradientBoostingClassifier,
)
__all__ = [
"BaseEnsemble",
"RandomForestClassifier",
"RandomForestRegressor",
"RandomTreesEmbedding",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"BaggingClassifier",
"BaggingRegressor",
"IsolationForest",
"GradientBoostingClassifier",
"GradientBoostingRegressor",
"AdaBoostClassifier",
"AdaBoostRegressor",
"VotingClassifier",
"VotingRegressor",
"StackingClassifier",
"StackingRegressor",
"HistGradientBoostingClassifier",
"HistGradientBoostingRegressor",
]
| bsd-3-clause |
czhuang/ChordRipple | chord2vec/plot_utilities.py | 1 | 17153 |
from copy import copy
import numpy as np
import pylab as plt
from pylab import setp
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
from music_theory_tools import RELATIVE_MINOR, CIRCLE_OF_FIFTHS_MINOR_DICT, CIRCLE_OF_FIFTHS_MAJOR_DICT
from load_songs_tools import get_raw_data, get_configs
def plot_bach_dist():
configs = get_configs()
print configs
configs['corpus'] = 'bach'
configs['min_count'] = 1
plot_dist(configs)
def plot_rock_nontransposed_dist():
configs = get_configs()
configs['corpus'] = 'rock'
configs['min_count'] = 1
configs['transposed'] = False
plot_dist(configs)
def plot_rock_transposed_dist():
configs = get_configs()
configs['corpus'] = 'rock'
configs['min_count'] = 1
configs['transposed'] = True
plot_dist(configs)
def plot_dist(configs):
from make_model_tools import make_Ngram
ngram = make_Ngram(configs)
counts, syms = ngram.get_sorted_counts()
print "---top 5---"
for i in range(5):
print syms[i], counts[i]
print "\n---bottom 5---"
for i in range(1, 20):
print syms[-i], counts[-i]
corpus = configs['corpus']
plt.figure(figsize=(10, 6))
plt.bar(range(len(counts)), counts)
plt.title('Sorted chord counts in %s corpus' % corpus, fontsize=16)
plt.ylabel('Chord counts', fontsize=14)
plt.xlabel('Chords indexed by descending counts', fontsize=14)
setp(plt.gca().get_xticklabels(), fontsize=12)
setp(plt.gca().get_xticklabels(), fontsize=10)
# plt.savefig('%s-counts.pdf' % configs['corpus'])
plt.savefig('%s-counts.png' % configs['corpus'])
def plot_rock_mat():
configs = get_configs()
configs['corpus'] = 'rock'
configs['min_count'] = 1
configs['transposed'] = True
plot_mat_wrapper(configs)
def plot_bach_mat():
configs = get_configs()
configs['corpus'] = 'bach'
configs['min_count'] = 5
plot_mat_wrapper(configs)
def plot_mat_wrapper(configs):
from make_model_tools import make_Ngram
ngram = make_Ngram(configs)
counts, _ = ngram.get_sorted_counts()
# bigram = ngram.ngram_counts
bigram = ngram.ngram
syms = ngram.syms
print bigram.shape, len(syms)
assert bigram.shape[1] == len(syms)
plot_mat_pca_ordered(bigram, syms, configs,
fname_tag='', unigram=counts)
# title = "Transition Counts in the %s corpus" % configs['corpus']
# plot_mat(bigram, title, syms, save=True)
def plot_mat(mat, title, syms, y_tick_syms=None, save=False):
mat = np.asarray(mat)
print mat.size, len(syms)
assert mat.size == len(syms) or mat.size == len(syms)**2
from colormaps import inferno
plt.matshow(mat, cmap=inferno)
# plt.title(title)
from pylab import setp
# if mat.shape[1] < 23:
# fontsize = 'medium'
# else:
# fontsize = 'xx-small'
fontsize = 'small'
fontsize = 'xx-small'
if y_tick_syms is None:
y_tick_syms = syms
#print '# of syms:', len(syms)
print ' '.join(syms)
if len(y_tick_syms) == 1:
if y_tick_syms[0] == 'V':
y_tick_str = '$'+y_tick_syms[0]+'_1$'
else:
y_tick_str = '$'+y_tick_syms[0]+'$'
plt.yticks([0], [y_tick_str], fontweight='bold')
setp(plt.gca().get_yticklabels(), fontsize='large')
else:
if len(mat.shape) == 1:
plt.yticks(range(len(y_tick_syms)), y_tick_syms, fontweight='bold')
else:
plt.yticks(range(len(y_tick_syms)), y_tick_syms)
setp(plt.gca().get_yticklabels(), fontsize=fontsize)
# if one-dimensional
if len(mat.shape) == 1:
plt.xticks(range(len(syms)), syms, fontweight='bold')
else:
plt.xticks(range(len(syms)), syms, rotation='vertical')
setp(plt.gca().get_xticklabels(), fontsize=fontsize)
plt.title(title, y=1.1)
# plt.colorbar(shrink=.8)
if len(mat.shape) == 1:
colorbar = plt.colorbar(shrink=.7, ticks=np.arange(-2.0, 2.1, 1.0))
else:
colorbar = plt.colorbar(shrink=.7)
if len(y_tick_syms) == 1:
colorbar.set_label('chord weights in '+y_tick_str, fontsize=9) # labelpad=-40, y=0.45)
colorbar.ax.tick_params(labelsize=9)
# plt.tight_layout()
plt.savefig('not_sorted_mat.pdf')
def plot_mat_pca_ordered(bigram, syms, configs,
fname_tag='', unigram=None):
from sklearn.decomposition import PCA
pca = PCA(n_components=1)
pca.fit(bigram)
transformed_bigram = np.squeeze(pca.transform(bigram))
print transformed_bigram.shape
print 'variance covered: %.2f' % np.sum(pca.explained_variance_ratio_)
plot_mat_sort_with(transformed_bigram, syms, configs,
bigram, unigram,
fname_tag=fname_tag, technique='PCA')
return transformed_bigram
def plot_mat_sort_with(values_to_sort_with, syms, configs,
bigram, unigram=None, fname_tag='', technique='PCA'):
sorted_inds = np.argsort(-values_to_sort_with)
print sorted_inds
sorted_syms = [syms[ind] for ind in sorted_inds]
# sorted_ngram = bigram[sorted_inds]
print bigram.shape
sorted_ngram = [bigram[ind, sorted_inds] for ind in sorted_inds]
sorted_ngram = np.squeeze(np.asarray(sorted_ngram))
print sorted_ngram.shape
plt.clf()
title_str = '%s sorted %s transition count matrix' % (technique, fname_tag)
plot_mat(sorted_ngram, title_str, sorted_syms)
plt.savefig('all-%s-ordered-%s-%s.pdf' %
(technique, fname_tag, configs.name))
plt.savefig('all-%s-ordered-%s-%s.png' %
(technique, fname_tag, configs.name))
# just for saving the transition into text, sorted by unigram..
if unigram is None:
return
output_transition_as_text(unigram, sorted_inds,
sorted_ngram, sorted_syms, syms, fname_tag)
def output_transition_as_text(unigram, sorted_inds,
sorted_ngram, sorted_syms, syms, fname_tag=''):
test_sorted_syms = [syms[i] for i in sorted_inds]
print len(test_sorted_syms), len(sorted_syms)
print test_sorted_syms[5:]
print sorted_syms[5:]
assert test_sorted_syms == sorted_syms
sorted_unigram = [unigram[ind] for ind in sorted_inds]
line = ''
for i in range(len(syms)):
line += '\n%s (%d): ' % (sorted_syms[i], sorted_unigram[i])
local_sorted_inds = np.argsort(sorted_ngram[i, :])[::-1]
local_sorted_syms = [sorted_syms[ind] for ind in local_sorted_inds]
for j in range(len(syms)):
line += '%s (%.2f), ' % (local_sorted_syms[j],
sorted_ngram[i, local_sorted_inds[j]])
fname = 'transitions-%s-bach.txt' % fname_tag
with open(fname, 'w') as p:
p.writelines(line)
def plot_mat_sorted_with_itself(values_to_sort_with, syms,
configs, row_tag, topn=None, save=False,
title_str='', fname_tag=''):
#print 'shape', values_to_sort_with.shape
sorted_inds = np.argsort(values_to_sort_with)
sorted_inds = np.squeeze(sorted_inds)
# syms = data.syms
if topn is None:
topn = len(syms)
sorted_syms = [syms[ind] for ind in sorted_inds[:topn]]
sorted_ngram = np.sort(values_to_sort_with)[::-1][:topn]
if save:
plt.clf()
#print sorted_ngram.shape
if len(sorted_ngram.shape) == 1:
sorted_ngram = sorted_ngram[None, :]
#print sorted_ngram.shape
plot_mat(sorted_ngram, title_str,
sorted_syms, y_tick_syms=[row_tag])
if save:
plt.savefig('trans-%s-%s.pdf' % (fname_tag, configs.name))
def project_3d_to_2d(xyz, ax):
x2, y2, _ = proj3d.proj_transform(xyz[0], xyz[1], xyz[2], ax.get_proj())
xy = np.array([x2, y2])
return xy
# def annotate(syms, vecs, pl_ax, is_3d, color='b', text_size=None):
# print '# of syms:', len(syms)
# print vecs.shape
# for i, sym in enumerate(syms):
# xy = vecs[i, :]
# if is_3d:
# xy = project_3d_to_2d(xy, pl_ax)
# # if DUPLICATE_BY_ROTATE:
# # text_size = 'small'
# # else:
# # text_size = 'xx-small'
# if text_size is None:
# # text_size = 'large'
# text_size = 'xx-small'
# pl_ax.annotate(sym, xy=xy, xytext=(-3, 2),
# textcoords = 'offset points', size=text_size, color=color)
def annotate(syms, vecs, pl_ax, text_size=None, color='b'):
assert len(syms) == vecs.shape[0]
print '# of syms:', len(syms)
print vecs.shape
for i, sym in enumerate(syms):
xy = vecs[i, :]
if text_size is None:
text_size = 'x-small'
text_size = 'small'
if text_size == 'x-large':
fontweight = 'bold'
else:
fontweight = 'normal'
pl_ax.annotate(sym, xy=xy, xytext=(-3, 2),
textcoords = 'offset points', size=text_size, color=color, fontweight=fontweight)
def add_arrow_annotation(syms, vecs, arrow_dict, pl_ax, is_3d=False, color='#ee8d18',
linewidth=3):
if not is_3d:
assert vecs.shape[1] == 2
else:
assert vecs.shape[1] == 3
highlight_syms = arrow_dict.keys()
filtered_vecs, filtered_syms = filter_syms(vecs, syms, include_syms=highlight_syms)
print highlight_syms
print filtered_syms
# assert len(highlight_syms) == len(filtered_syms)
annotate(filtered_syms, filtered_vecs, pl_ax, color=color, text_size='x-large')
for start, end in arrow_dict.iteritems():
if start not in syms or end not in syms:
continue
start_ind = syms.index(start)
end_ind = syms.index(end)
start_pos = vecs[start_ind, :]
if is_3d:
start_pos = project_3d_to_2d(start_pos, pl_ax)
# end_pos = (vecs[end_ind, :] - vecs[start_ind, :])
end_pos = vecs[end_ind, :]
if is_3d:
end_pos = project_3d_to_2d(end_pos, pl_ax)
diff = end_pos - start_pos
# head_length = np.sqrt(np.square(end_pos[0]) + np.square(end_pos[1]))*0.1
pl_ax.arrow(start_pos[0], start_pos[1], diff[0], diff[1],
fc=color, ec=color, head_width=0, head_length=0,
linewidth=linewidth) # head_width=0.05, head_length=head_length,
def add_most_annotations(syms, vecs, ax, is_3d, plot_relative_minor=True):
# annotate(syms, vecs, ax, is_3d)
# add_arrow_annotation(syms, vecs, second_res_dict, ax, is_3d)
add_arrow_annotation(syms, vecs, CIRCLE_OF_FIFTHS_MAJOR_DICT, ax, is_3d, color='g')
add_arrow_annotation(syms, vecs, CIRCLE_OF_FIFTHS_MINOR_DICT, ax, is_3d, color='m')
if plot_relative_minor:
add_arrow_annotation(syms, vecs, RELATIVE_MINOR, ax, is_3d)
def add_secondary_dominants_annotations(syms, vecs, second_res_dict, ax, is_3d):
annotate(syms, vecs, ax, is_3d)
add_arrow_annotation(syms, vecs, second_res_dict, ax, is_3d)
def add_relative_minor_annotations(syms, vecs, ax, is_3d):
annotate(syms, vecs, ax, is_3d)
add_arrow_annotation(syms, vecs, RELATIVE_MINOR, ax, is_3d)
def make_song_dict(song):
song_dict = {}
for i in range(len(song)-1):
song_dict[song[i]] = song[i+1]
return song_dict
def plot_vec(vecs, syms, configs, highlight_syms=[],
with_annotations=False, save=False,
fname_tag=None, return_ax=False,
zero_gram_vec=None, second_res_dict=None, plot3d=False,
subplot=False, doPCA=False):
# TODO: hacky settings
PLOT_SECOND_ONLY = False
PLOT_RELATIVE_MINOR_ONLY = False
PLOT_RELATIVE_MINOR = False
ONLY_SHOW_HIGHLIGH_SYMBOLS = False
original_vecs = vecs
from postprocessing_tools import pca_project
if doPCA:
if plot3d is True:
vecs = pca_project(vecs, n_components=3)
else:
vecs = pca_project(vecs, n_components=2)
else:
if not plot3d:
assert vecs.shape[1] == 2
else:
assert vecs.shape[1] == 3
if zero_gram_vec is None:
dot_sizes = 2 # 5
else:
dot_sizes = np.exp(np.log(zero_gram_vec))/60
highlight_dot_sizes = 25
if vecs.shape[1] < 3:
plot3d = False
if PLOT_SECOND_ONLY:
print 'second_res_dict', len(second_res_dict)
print second_res_dict
if len(highlight_syms) == 0:
from music_theory_tools import CIRCLE_OF_FIFTHS_MAJOR, CIRCLE_OF_FIFTHS_MINOR
highlight_syms.extend(CIRCLE_OF_FIFTHS_MAJOR[::])
highlight_syms.extend(CIRCLE_OF_FIFTHS_MINOR[::])
highlight_vecs = []
highlight_syms_copy = copy(highlight_syms)
for s in highlight_syms_copy:
if s in syms:
ind = syms.index(s)
highlight_vecs.append(vecs[ind, :])
else:
print 'WARNING: %s is not in syms' % s
highlight_syms.remove(s)
highlight_vecs = np.squeeze(highlight_vecs)
if plot3d:
print '3D'
if not subplot:
fig = plt.figure()
# TODO: fig might not be defined for 3D subplot
ax = fig.add_subplot(1, 1, 1, projection='3d')
ax.scatter(vecs[:, 0], vecs[:, 1], vecs[:, 2],
s=highlight_dot_sizes, color='b')
if len(highlight_vecs) > 0:
ax.scatter(highlight_vecs[:, 0], highlight_vecs[:, 1],
highlight_vecs[:, 2], s=dot_sizes, color='r')
if not with_annotations:
annotate(syms, vecs, ax, True)
elif PLOT_SECOND_ONLY:
add_secondary_dominants_annotations(syms, vecs, second_res_dict, ax, True)
elif PLOT_RELATIVE_MINOR_ONLY:
add_relative_minor_annotations(syms, vecs, ax, True)
else:
add_most_annotations(syms, vecs, ax, True,
plot_relative_minor=PLOT_RELATIVE_MINOR)
else:
print '2D', np.shape(vecs)
if not subplot:
plt.figure()
if vecs.shape[1] > 1:
if not ONLY_SHOW_HIGHLIGH_SYMBOLS:
plt.scatter(vecs[:, 0], vecs[:, 1], s=dot_sizes, color='b')
if len(highlight_vecs) > 0:
plt.scatter(highlight_vecs[:, 0], highlight_vecs[:, 1],
s=highlight_dot_sizes, color='k')
ax = plt.gca()
if len(highlight_syms) == 0 or not ONLY_SHOW_HIGHLIGH_SYMBOLS:
annotate(syms, vecs, ax, False)
else:
annotate(highlight_syms, highlight_vecs, ax, False)
if not with_annotations:
pass
elif PLOT_SECOND_ONLY:
add_secondary_dominants_annotations(syms, vecs, second_res_dict, ax)
elif PLOT_RELATIVE_MINOR_ONLY:
add_relative_minor_annotations(syms, vecs, ax, False)
else:
add_most_annotations(syms, vecs, ax, False, plot_relative_minor=PLOT_RELATIVE_MINOR)
else:
# just 1D
plt.plot(vecs, 'x', )
# pl_ax.annotate(sym, xy=xy, xytext=(-3, 2), textcoords = 'offset points', size=text_size, color='b')
# title_str = ' '.join(SIMPLE_CHORDS_ORDERED)
# plt.title('chord space')
plt.tick_params(axis='both', which='major', labelsize=6)
fname = configs.name
# fname = get_fname_base(vecs, syms, seqs, vec_dim, window_sz=window_sz)
# fname = get_fname_base(vecs, syms, seqs, min_count=min_count, vec_dim=vec_dim,
# do_pca=doPCA, window_sz=window_sz)
if plot3d:
fname += '-3d'
if PLOT_SECOND_ONLY:
fname += '-second'
if PLOT_RELATIVE_MINOR_ONLY or PLOT_RELATIVE_MINOR:
fname += '-relative_minor'
if fname_tag is not None:
fname += '-%s' % fname_tag
if save:
plt.savefig('%s.pdf' % fname)
if return_ax:
# TODO: ax might not be defined depending on usage
return fname, ax, vecs
else:
return fname
def filter_syms(vecs, syms, exclude_syms=None, include_syms=None):
filtered_vecs = []
filtered_syms = []
if exclude_syms is not None:
for i, sym in enumerate(syms):
if sym not in exclude_syms:
filtered_vecs.append(vecs[i, :])
filtered_syms.append(sym)
else:
filtered_vecs = vecs.copy()
filtered_syms = copy(syms)
filtered_vecs = np.asarray(filtered_vecs)
filtered_again_vecs = []
filtered_again_syms = []
if include_syms is not None:
for i, sym in enumerate(filtered_syms):
if sym in include_syms:
filtered_again_vecs.append(filtered_vecs[i, :])
filtered_again_syms.append(sym)
else:
filtered_again_vecs = filtered_vecs.copy()
filtered_again_syms = copy(filtered_syms)
return np.asarray(filtered_again_vecs), filtered_again_syms
if __name__ == '__main__':
# plot_bach_dist()
# plot_rock_nontransposed_dist()
# plot_rock_transposed_dist()
# plot_rock_mat()
plot_bach_mat() | mit |
bzero/statsmodels | statsmodels/tsa/statespace/sarimax.py | 12 | 82127 | """
SARIMAX Model
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
from warnings import warn
import numpy as np
import pandas as pd
from .kalman_filter import KalmanFilter, FilterResults
from .mlemodel import MLEModel, MLEResults, MLEResultsWrapper
from .tools import (
companion_matrix, diff, is_invertible, constrain_stationary_univariate,
unconstrain_stationary_univariate
)
from scipy.linalg import solve_discrete_lyapunov
from statsmodels.tools.tools import Bunch
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tsa.tsatools import lagmat
from statsmodels.tools.decorators import cache_readonly
import statsmodels.base.wrapper as wrap
class SARIMAX(MLEModel):
r"""
Seasonal AutoRegressive Integrated Moving Average with eXogenous regressors
model
Parameters
----------
endog : array_like
The observed time-series process :math:`y`
exog : array_like, optional
Array of exogenous regressors, shaped nobs x k.
order : iterable or iterable of iterables, optional
The (p,d,q) order of the model for the number of AR parameters,
differences, and MA parameters. `d` must be an integer
indicating the integration order of the process, while
`p` and `q` may either be an integers indicating the AR and MA
orders (so that all lags up to those orders are included) or else
iterables giving specific AR and / or MA lags to include. Default is
an AR(1) model: (1,0,0).
seasonal_order : iterable, optional
The (P,D,Q,s) order of the seasonal component of the model for the
AR parameters, differences, MA parameters, and periodicity.
`d` must be an integer indicating the integration order of the process,
while `p` and `q` may either be an integers indicating the AR and MA
orders (so that all lags up to those orders are included) or else
iterables giving specific AR and / or MA lags to include. `s` is an
integer giving the periodicity (number of periods in season), often it
is 4 for quarterly data or 12 for monthly data. Default is no seasonal
effect.
trend : str{'n','c','t','ct'} or iterable, optional
Parameter controlling the deterministic trend polynomial :math:`A(t)`.
Can be specified as a string where 'c' indicates a constant (i.e. a
degree zero component of the trend polynomial), 't' indicates a
linear trend with time, and 'ct' is both. Can also be specified as an
iterable defining the polynomial as in `numpy.poly1d`, where
`[1,1,0,1]` would denote :math:`a + bt + ct^3`. Default is to not
include a trend component.
measurement_error : boolean, optional
Whether or not to assume the endogenous observations `endog` were
measured with error. Default is False.
time_varying_regression : boolean, optional
Used when an explanatory variables, `exog`, are provided provided
to select whether or not coefficients on the exogenous regressors are
allowed to vary over time. Default is False.
mle_regression : boolean, optional
Whether or not to use estimate the regression coefficients for the
exogenous variables as part of maximum likelihood estimation or through
the Kalman filter (i.e. recursive least squares). If
`time_varying_regression` is True, this must be set to False. Default
is True.
simple_differencing : boolean, optional
Whether or not to use partially conditional maximum likelihood
estimation. If True, differencing is performed prior to estimation,
which discards the first :math:`s D + d` initial rows but reuslts in a
smaller state-space formulation. If False, the full SARIMAX model is
put in state-space form so that all datapoints can be used in
estimation. Default is False.
enforce_stationarity : boolean, optional
Whether or not to transform the AR parameters to enforce stationarity
in the autoregressive component of the model. Default is True.
enforce_invertibility : boolean, optional
Whether or not to transform the MA parameters to enforce invertibility
in the moving average component of the model. Default is True.
hamilton_representation : boolean, optional
Whether or not to use the Hamilton representation of an ARMA process
(if True) or the Harvey representation (if False). Default is False.
**kwargs
Keyword arguments may be used to provide default values for state space
matrices or for Kalman filtering options. See `Representation`, and
`KalmanFilter` for more details.
Attributes
----------
measurement_error : boolean
Whether or not to assume the endogenous
observations `endog` were measured with error.
state_error : boolean
Whether or not the transition equation has an error component.
mle_regression : boolean
Whether or not the regression coefficients for
the exogenous variables were estimated via maximum
likelihood estimation.
state_regression : boolean
Whether or not the regression coefficients for
the exogenous variables are included as elements
of the state space and estimated via the Kalman
filter.
time_varying_regression : boolean
Whether or not coefficients on the exogenous
regressors are allowed to vary over time.
simple_differencing : boolean
Whether or not to use partially conditional maximum likelihood
estimation.
enforce_stationarity : boolean
Whether or not to transform the AR parameters
to enforce stationarity in the autoregressive
component of the model.
enforce_invertibility : boolean
Whether or not to transform the MA parameters
to enforce invertibility in the moving average
component of the model.
hamilton_representation : boolean
Whether or not to use the Hamilton representation of an ARMA process.
trend : str{'n','c','t','ct'} or iterable
Parameter controlling the deterministic
trend polynomial :math:`A(t)`. See the class
parameter documentation for more information.
polynomial_ar : array
Array containing autoregressive lag polynomial
coefficients, ordered from lowest degree to highest.
Initialized with ones, unless a coefficient is
constrained to be zero (in which case it is zero).
polynomial_ma : array
Array containing moving average lag polynomial
coefficients, ordered from lowest degree to highest.
Initialized with ones, unless a coefficient is
constrained to be zero (in which case it is zero).
polynomial_seasonal_ar : array
Array containing seasonal moving average lag
polynomial coefficients, ordered from lowest degree
to highest. Initialized with ones, unless a
coefficient is constrained to be zero (in which
case it is zero).
polynomial_seasonal_ma : array
Array containing seasonal moving average lag
polynomial coefficients, ordered from lowest degree
to highest. Initialized with ones, unless a
coefficient is constrained to be zero (in which
case it is zero).
polynomial_trend : array
Array containing trend polynomial coefficients,
ordered from lowest degree to highest. Initialized
with ones, unless a coefficient is constrained to be
zero (in which case it is zero).
k_ar : int
Highest autoregressive order in the model, zero-indexed.
k_ar_params : int
Number of autoregressive parameters to be estimated.
k_diff : int
Order of intergration.
k_ma : int
Highest moving average order in the model, zero-indexed.
k_ma_params : int
Number of moving average parameters to be estimated.
k_seasons : int
Number of periods in a season.
k_seasonal_ar : int
Highest seasonal autoregressive order in the model, zero-indexed.
k_seasonal_ar_params : int
Number of seasonal autoregressive parameters to be estimated.
k_seasonal_diff : int
Order of seasonal intergration.
k_seasonal_ma : int
Highest seasonal moving average order in the model, zero-indexed.
k_seasonal_ma_params : int
Number of seasonal moving average parameters to be estimated.
k_trend : int
Order of the trend polynomial plus one (i.e. the constant polynomial
would have `k_trend=1`).
k_exog : int
Number of exogenous regressors.
Notes
-----
The SARIMA model is specified :math:`(p, d, q) \times (P, D, Q)_s`.
.. math::
\phi_p (L) \tilde \phi_P (L^s) \Delta^d \Delta_s^D y_t = A(t) +
\theta_q (L) \tilde \theta_Q (L^s) \zeta_t
In terms of a univariate structural model, this can be represented as
.. math::
y_t & = u_t + \eta_t \\
\phi_p (L) \tilde \phi_P (L^s) \Delta^d \Delta_s^D u_t & = A(t) +
\theta_q (L) \tilde \theta_Q (L^s) \zeta_t
where :math:`\eta_t` is only applicable in the case of measurement error
(although it is also used in the case of a pure regression model, i.e. if
p=q=0).
In terms of this model, regression with SARIMA errors can be represented
easily as
.. math::
y_t & = \beta_t x_t + u_t \\
\phi_p (L) \tilde \phi_P (L^s) \Delta^d \Delta_s^D u_t & = A(t) +
\theta_q (L) \tilde \theta_Q (L^s) \zeta_t
this model is the one used when exogenous regressors are provided.
Note that the reduced form lag polynomials will be written as:
.. math::
\Phi (L) \equiv \phi_p (L) \tilde \phi_P (L^s) \\
\Theta (L) \equiv \theta_q (L) \tilde \theta_Q (L^s)
If `mle_regression` is True, regression coefficients are treated as
additional parameters to be estimated via maximum likelihood. Otherwise
they are included as part of the state with a diffuse initialization.
In this case, however, with approximate diffuse initialization, results
can be sensitive to the initial variance.
This class allows two different underlying representations of ARMA models
as state space models: that of Hamilton and that of Harvey. Both are
equivalent in the sense that they are analytical representations of the
ARMA model, but the state vectors of each have different meanings. For
this reason, maximum likelihood does not result in identical parameter
estimates and even the same set of parameters will result in different
loglikelihoods.
The Harvey representation is convenient because it allows integrating
differencing into the state vector to allow using all observations for
estimation.
In this implementation of differenced models, the Hamilton representation
is not able to accomodate differencing in the state vector, so
`simple_differencing` (which performs differencing prior to estimation so
that the first d + sD observations are lost) must be used.
Many other packages use the Hamilton representation, so that tests against
Stata and R require using it along with simple differencing (as Stata
does).
Detailed information about state space models can be found in [1]_. Some
specific references are:
- Chapter 3.4 describes ARMA and ARIMA models in state space form (using
the Harvey representation), and gives references for basic seasonal
models and models with a multiplicative form (for example the airline
model). It also shows a state space model for a full ARIMA process (this
is what is done here if `simple_differencing=False`).
- Chapter 3.6 describes estimating regression effects via the Kalman filter
(this is performed if `mle_regression` is False), regression with
time-varying coefficients, and regression with ARMA errors (recall from
above that if regression effects are present, the model estimated by this
class is regression with SARIMA errors).
- Chapter 8.4 describes the application of an ARMA model to an example
dataset. A replication of this section is available in an example
IPython notebook in the documentation.
References
----------
.. [1] Durbin, James, and Siem Jan Koopman. 2012.
Time Series Analysis by State Space Methods: Second Edition.
Oxford University Press.
"""
def __init__(self, endog, exog=None, order=(1, 0, 0),
seasonal_order=(0, 0, 0, 0), trend=None,
measurement_error=False, time_varying_regression=False,
mle_regression=True, simple_differencing=False,
enforce_stationarity=True, enforce_invertibility=True,
hamilton_representation=False, **kwargs):
# Model parameters
self.k_seasons = seasonal_order[3]
self.measurement_error = measurement_error
self.time_varying_regression = time_varying_regression
self.mle_regression = mle_regression
self.simple_differencing = simple_differencing
self.enforce_stationarity = enforce_stationarity
self.enforce_invertibility = enforce_invertibility
self.hamilton_representation = hamilton_representation
# Save given orders
self.order = order
self.seasonal_order = seasonal_order
# Enforce non-MLE coefficients if time varying coefficients is
# specified
if self.time_varying_regression and self.mle_regression:
raise ValueError('Models with time-varying regression coefficients'
' must integrate the coefficients as part of the'
' state vector, so that `mle_regression` must'
' be set to False.')
# Lag polynomials
# Assume that they are given from lowest degree to highest, that all
# degrees except for the constant are included, and that they are
# boolean vectors (0 for not included, 1 for included).
if isinstance(order[0], int):
self.polynomial_ar = np.r_[1., np.ones(order[0])]
else:
self.polynomial_ar = np.r_[1., order[0]]
if isinstance(order[2], int):
self.polynomial_ma = np.r_[1., np.ones(order[2])]
else:
self.polynomial_ma = np.r_[1., order[2]]
# Assume that they are given from lowest degree to highest, that the
# degrees correspond to (1*s, 2*s, ..., P*s), and that they are
# boolean vectors (0 for not included, 1 for included).
if isinstance(seasonal_order[0], int):
self.polynomial_seasonal_ar = np.r_[
1., # constant
([0] * (self.k_seasons - 1) + [1]) * seasonal_order[0]
]
else:
self.polynomial_seasonal_ar = np.r_[
1., [0] * self.k_seasons * len(seasonal_order[0])
]
for i in range(len(seasonal_order[0])):
self.polynomial_seasonal_ar[(i + 1) * self.k_seasons] = (
seasonal_order[0][i]
)
if isinstance(seasonal_order[2], int):
self.polynomial_seasonal_ma = np.r_[
1., # constant
([0] * (self.k_seasons - 1) + [1]) * seasonal_order[2]
]
else:
self.polynomial_seasonal_ma = np.r_[
1., [0] * self.k_seasons * len(seasonal_order[2])
]
for i in range(len(seasonal_order[2])):
self.polynomial_seasonal_ma[(i + 1) * self.k_seasons] = (
seasonal_order[2][i]
)
# Deterministic trend polynomial
self.trend = trend
if trend is None or trend == 'n':
self.polynomial_trend = np.ones((0))
elif trend == 'c':
self.polynomial_trend = np.r_[1]
elif trend == 't':
self.polynomial_trend = np.r_[0, 1]
elif trend == 'ct':
self.polynomial_trend = np.r_[1, 1]
else:
self.polynomial_trend = (np.array(trend) > 0).astype(int)
# Model orders
# Note: k_ar, k_ma, k_seasonal_ar, k_seasonal_ma do not include the
# constant term, so they may be zero.
# Note: for a typical ARMA(p,q) model, p = k_ar_params = k_ar - 1 and
# q = k_ma_params = k_ma - 1, although this may not be true for models
# with arbitrary log polynomials.
self.k_ar = int(self.polynomial_ar.shape[0] - 1)
self.k_ar_params = int(np.sum(self.polynomial_ar) - 1)
self.k_diff = int(order[1])
self.k_ma = int(self.polynomial_ma.shape[0] - 1)
self.k_ma_params = int(np.sum(self.polynomial_ma) - 1)
self.k_seasonal_ar = int(self.polynomial_seasonal_ar.shape[0] - 1)
self.k_seasonal_ar_params = (
int(np.sum(self.polynomial_seasonal_ar) - 1)
)
self.k_seasonal_diff = int(seasonal_order[1])
self.k_seasonal_ma = int(self.polynomial_seasonal_ma.shape[0] - 1)
self.k_seasonal_ma_params = (
int(np.sum(self.polynomial_seasonal_ma) - 1)
)
# Make internal copies of the differencing orders because if we use
# simple differencing, then we will need to internally use zeros after
# the simple differencing has been performed
self._k_diff = self.k_diff
self._k_seasonal_diff = self.k_seasonal_diff
# We can only use the Hamilton representation if differencing is not
# performed as a part of the state space
if (self.hamilton_representation and not (self.simple_differencing or
self._k_diff == self._k_seasonal_diff == 0)):
raise ValueError('The Hamilton representation is only available'
' for models in which there is no differencing'
' integrated into the state vector. Set'
' `simple_differencing` to True or set'
' `hamilton_representation` to False')
# Note: k_trend is not the degree of the trend polynomial, because e.g.
# k_trend = 1 corresponds to the degree zero polynomial (with only a
# constant term).
self.k_trend = int(np.sum(self.polynomial_trend))
# Model order
# (this is used internally in a number of locations)
self._k_order = max(self.k_ar + self.k_seasonal_ar,
self.k_ma + self.k_seasonal_ma + 1)
if self._k_order == 1 and self.k_ar + self.k_seasonal_ar == 0:
self._k_order = 0
# Exogenous data
self.k_exog = 0
if exog is not None:
exog_is_using_pandas = _is_using_pandas(exog, None)
if not exog_is_using_pandas:
exog = np.asarray(exog)
# Make sure we have 2-dimensional array
if exog.ndim == 1:
if not exog_is_using_pandas:
exog = exog[:, None]
else:
exog = pd.DataFrame(exog)
self.k_exog = exog.shape[1]
# Redefine mle_regression to be true only if it was previously set to
# true and there are exogenous regressors
self.mle_regression = (
self.mle_regression and exog is not None and self.k_exog > 0
)
# State regression is regression with coefficients estiamted within
# the state vector
self.state_regression = (
not self.mle_regression and exog is not None and self.k_exog > 0
)
# If all we have is a regression (so k_ar = k_ma = 0), then put the
# error term as measurement error
if self.state_regression and self._k_order == 0:
self.measurement_error = True
# Number of states
k_states = self._k_order
if not self.simple_differencing:
k_states += self.k_seasons * self._k_seasonal_diff + self._k_diff
if self.state_regression:
k_states += self.k_exog
# Number of diffuse states
k_diffuse_states = k_states
if self.enforce_stationarity:
k_diffuse_states -= self._k_order
# Number of positive definite elements of the state covariance matrix
k_posdef = int(self._k_order > 0)
# Only have an error component to the states if k_posdef > 0
self.state_error = k_posdef > 0
if self.state_regression and self.time_varying_regression:
k_posdef += self.k_exog
# Diffuse initialization can be more sensistive to the variance value
# in the case of state regression, so set a higher than usual default
# variance
if self.state_regression:
kwargs.setdefault('initial_variance', 1e10)
# Number of parameters
self.k_params = (
self.k_ar_params + self.k_ma_params +
self.k_seasonal_ar_params + self.k_seasonal_ar_params +
self.k_trend +
self.measurement_error + 1
)
if self.mle_regression:
self.k_params += self.k_exog
# We need to have an array or pandas at this point
self.orig_endog = endog
self.orig_exog = exog
if not _is_using_pandas(endog, None):
endog = np.asanyarray(endog)
# Update the differencing dimensions if simple differencing is applied
self.orig_k_diff = self._k_diff
self.orig_k_seasonal_diff = self._k_seasonal_diff
if (self.simple_differencing and
(self._k_diff > 0 or self._k_seasonal_diff > 0)):
self._k_diff = 0
self._k_seasonal_diff = 0
# Internally used in several locations
self._k_states_diff = (
self._k_diff + self.k_seasons * self._k_seasonal_diff
)
# Set some model variables now so they will be available for the
# initialize() method, below
self.nobs = len(endog)
self.k_states = k_states
self.k_posdef = k_posdef
# By default, do not calculate likelihood while it is controlled by
# diffuse initial conditions.
kwargs.setdefault('loglikelihood_burn', k_diffuse_states)
# Initialize the statespace
super(SARIMAX, self).__init__(
endog, exog=exog, k_states=k_states, k_posdef=k_posdef, **kwargs
)
# Handle kwargs specified initialization
if self.ssm.initialization is not None:
self._manual_initialization = True
# Initialize the fixed components of the statespace model
self.ssm.design = self.initial_design
self.ssm.state_intercept = self.initial_state_intercept
self.ssm.transition = self.initial_transition
self.ssm.selection = self.initial_selection
# If we are estimating a simple ARMA model, then we can use a faster
# initialization method (unless initialization was already specified).
if k_diffuse_states == 0 and not self._manual_initialization:
self.initialize_stationary()
# update _init_keys attached by super
self._init_keys += ['order', 'seasonal_order', 'trend',
'measurement_error', 'time_varying_regression',
'mle_regression', 'simple_differencing',
'enforce_stationarity', 'enforce_invertibility',
'hamilton_representation'] + list(kwargs.keys())
# TODO: I think the kwargs or not attached, need to recover from ???
def _get_init_kwds(self):
kwds = super(SARIMAX, self)._get_init_kwds()
for key, value in kwds.items():
if value is None and hasattr(self.ssm, key):
kwds[key] = getattr(self.ssm, key)
return kwds
def prepare_data(self):
endog, exog = super(SARIMAX, self).prepare_data()
# Perform simple differencing if requested
if (self.simple_differencing and
(self.orig_k_diff > 0 or self.orig_k_seasonal_diff > 0)):
# Perform simple differencing
endog = diff(endog.copy(), self.orig_k_diff,
self.orig_k_seasonal_diff, self.k_seasons)
if exog is not None:
exog = diff(exog.copy(), self.orig_k_diff,
self.orig_k_seasonal_diff, self.k_seasons)
# Reset the ModelData datasets
self.data.endog, self.data.exog = (
self.data._convert_endog_exog(endog, exog))
# Reset the nobs
self.nobs = endog.shape[0]
# Cache the arrays for calculating the intercept from the trend
# components
time_trend = np.arange(1, self.nobs + 1)
self._trend_data = np.zeros((self.nobs, self.k_trend))
i = 0
for k in self.polynomial_trend.nonzero()[0]:
if k == 0:
self._trend_data[:, i] = np.ones(self.nobs,)
else:
self._trend_data[:, i] = time_trend**k
i += 1
return endog, exog
def initialize(self):
"""
Initialize the SARIMAX model.
Notes
-----
These initialization steps must occur following the parent class
__init__ function calls.
"""
super(SARIMAX, self).initialize()
# Internal flag for whether the default mixed approximate diffuse /
# stationary initialization has been overridden with a user-supplied
# initialization
self._manual_initialization = False
# Cache the indexes of included polynomial orders (for update below)
# (but we do not want the index of the constant term, so exclude the
# first index)
self._polynomial_ar_idx = np.nonzero(self.polynomial_ar)[0][1:]
self._polynomial_ma_idx = np.nonzero(self.polynomial_ma)[0][1:]
self._polynomial_seasonal_ar_idx = np.nonzero(
self.polynomial_seasonal_ar
)[0][1:]
self._polynomial_seasonal_ma_idx = np.nonzero(
self.polynomial_seasonal_ma
)[0][1:]
# Save the indices corresponding to the reduced form lag polynomial
# parameters in the transition and selection matrices so that they
# don't have to be recalculated for each update()
start_row = self._k_states_diff
end_row = start_row + self.k_ar + self.k_seasonal_ar
col = self._k_states_diff
if not self.hamilton_representation:
self.transition_ar_params_idx = (
np.s_['transition', start_row:end_row, col]
)
else:
self.transition_ar_params_idx = (
np.s_['transition', col, start_row:end_row]
)
start_row += 1
end_row = start_row + self.k_ma + self.k_seasonal_ma
col = 0
if not self.hamilton_representation:
self.selection_ma_params_idx = (
np.s_['selection', start_row:end_row, col]
)
else:
self.design_ma_params_idx = (
np.s_['design', col, start_row:end_row]
)
# Cache indices for exog variances in the state covariance matrix
if self.state_regression and self.time_varying_regression:
idx = np.diag_indices(self.k_posdef)
self._exog_variance_idx = ('state_cov', idx[0][-self.k_exog:],
idx[1][-self.k_exog:])
def initialize_known(self, initial_state, initial_state_cov):
self._manual_initialization = True
self.ssm.initialize_known(initial_state, initial_state_cov)
initialize_known.__doc__ = KalmanFilter.initialize_known.__doc__
def initialize_approximate_diffuse(self, variance=None):
self._manual_initialization = True
self.ssm.initialize_approximate_diffuse(variance)
initialize_approximate_diffuse.__doc__ = (
KalmanFilter.initialize_approximate_diffuse.__doc__
)
def initialize_stationary(self):
self._manual_initialization = True
self.ssm.initialize_stationary()
initialize_stationary.__doc__ = (
KalmanFilter.initialize_stationary.__doc__
)
def initialize_state(self, variance=None):
"""
Initialize state and state covariance arrays in preparation for the
Kalman filter.
Parameters
----------
variance : float, optional
The variance for approximating diffuse initial conditions. Default
can be found in the Representation class documentation.
Notes
-----
Initializes the ARMA component of the state space to the typical
stationary values and the other components as approximate diffuse.
Can be overridden be calling one of the other initialization methods
before fitting the model.
"""
# Check if a manual initialization has already been specified
if self._manual_initialization:
return
# If we're not enforcing stationarity, then we can't initialize a
# stationary component
if not self.enforce_stationarity:
self.initialize_approximate_diffuse(variance)
return
# Otherwise, create the initial state and state covariance matrix
# as from a combination of diffuse and stationary components
# Create initialized non-stationary components
if variance is None:
variance = self.ssm.initial_variance
dtype = self.ssm.transition.dtype
initial_state = np.zeros(self.k_states, dtype=dtype)
initial_state_cov = np.eye(self.k_states, dtype=dtype) * variance
# Get the offsets (from the bottom or bottom right of the vector /
# matrix) for the stationary component.
if self.state_regression:
start = -(self.k_exog + self._k_order)
end = -self.k_exog if self.k_exog > 0 else None
else:
start = -self._k_order
end = None
# Add in the initialized stationary components
if self._k_order > 0:
selection_stationary = self.ssm.selection[start:end, :, 0]
selected_state_cov_stationary = np.dot(
np.dot(selection_stationary, self.ssm.state_cov[:, :, 0]),
selection_stationary.T
)
initial_state_cov_stationary = solve_discrete_lyapunov(
self.ssm.transition[start:end, start:end, 0],
selected_state_cov_stationary
)
initial_state_cov[start:end, start:end] = (
initial_state_cov_stationary
)
self.ssm.initialize_known(initial_state, initial_state_cov)
@property
def initial_design(self):
"""Initial design matrix"""
# Basic design matrix
design = np.r_[
[1] * self._k_diff,
([0] * (self.k_seasons - 1) + [1]) * self._k_seasonal_diff,
[1] * self.state_error, [0] * (self._k_order - 1)
]
# If we have exogenous regressors included as part of the state vector
# then the exogenous data is incorporated as a time-varying component
# of the design matrix
if self.state_regression:
if self._k_order > 0:
design = np.c_[
np.reshape(
np.repeat(design, self.nobs),
(design.shape[0], self.nobs)
).T,
self.exog
].T[None, :, :]
else:
design = self.exog.T[None, :, :]
return design
@property
def initial_state_intercept(self):
"""Initial state intercept vector"""
# TODO make this self.k_trend > 1 and adjust the update to take
# into account that if the trend is a constant, it is not time-varying
if self.k_trend > 0:
state_intercept = np.zeros((self.k_states, self.nobs))
else:
state_intercept = np.zeros((self.k_states,))
return state_intercept
@property
def initial_transition(self):
"""Initial transition matrix"""
transition = np.zeros((self.k_states, self.k_states))
# Exogenous regressors component
if self.state_regression:
start = -self.k_exog
# T_\beta
transition[start:, start:] = np.eye(self.k_exog)
# Autoregressive component
start = -(self.k_exog + self._k_order)
end = -self.k_exog if self.k_exog > 0 else None
else:
# Autoregressive component
start = -self._k_order
end = None
# T_c
transition[start:end, start:end] = companion_matrix(self._k_order)
if self.hamilton_representation:
transition[start:end, start:end] = np.transpose(
companion_matrix(self._k_order)
)
# Seasonal differencing component
# T^*
if self._k_seasonal_diff > 0:
seasonal_companion = companion_matrix(self.k_seasons).T
seasonal_companion[0, -1] = 1
for d in range(self._k_seasonal_diff):
start = self._k_diff + d * self.k_seasons
end = self._k_diff + (d + 1) * self.k_seasons
# T_c^*
transition[start:end, start:end] = seasonal_companion
# i
for i in range(d + 1, self._k_seasonal_diff):
transition[start, end + self.k_seasons - 1] = 1
# \iota
transition[start, self._k_states_diff] = 1
# Differencing component
if self._k_diff > 0:
idx = np.triu_indices(self._k_diff)
# T^**
transition[idx] = 1
# [0 1]
if self.k_seasons > 0:
start = self._k_diff
end = self._k_states_diff
transition[:self._k_diff, start:end] = (
([0] * (self.k_seasons - 1) + [1]) * self._k_seasonal_diff
)
# [1 0]
column = self._k_states_diff
transition[:self._k_diff, column] = 1
return transition
@property
def initial_selection(self):
"""Initial selection matrix"""
if not (self.state_regression and self.time_varying_regression):
if self.k_posdef > 0:
selection = np.r_[
[0] * (self._k_states_diff),
[1] * (self._k_order > 0), [0] * (self._k_order - 1),
[0] * ((1 - self.mle_regression) * self.k_exog)
][:, None]
else:
selection = np.zeros((self.k_states, 0))
else:
selection = np.zeros((self.k_states, self.k_posdef))
# Typical state variance
if self._k_order > 0:
selection[0, 0] = 1
# Time-varying regression coefficient variances
for i in range(self.k_exog, 0, -1):
selection[-i, -i] = 1
return selection
def filter(self, params, transformed=True, cov_type=None, return_ssm=False,
**kwargs):
params = np.array(params, ndmin=1)
# Transform parameters if necessary
if not transformed:
params = self.transform_params(params)
transformed = True
# Get the state space output
result = super(SARIMAX, self).filter(params, transformed, cov_type,
return_ssm=True, **kwargs)
# Wrap in a results object
if not return_ssm:
result_kwargs = {}
if cov_type is not None:
result_kwargs['cov_type'] = cov_type
result = SARIMAXResultsWrapper(
SARIMAXResults(self, params, result, **result_kwargs)
)
return result
@staticmethod
def _conditional_sum_squares(endog, k_ar, polynomial_ar, k_ma,
polynomial_ma, k_trend=0, trend_data=None):
k = 2 * k_ma
r = max(k + k_ma, k_ar)
k_params_ar = 0 if k_ar == 0 else len(polynomial_ar.nonzero()[0]) - 1
k_params_ma = 0 if k_ma == 0 else len(polynomial_ma.nonzero()[0]) - 1
residuals = None
if k_ar + k_ma + k_trend > 0:
# If we have MA terms, get residuals from an AR(k) model to use
# as data for conditional sum of squares estimates of the MA
# parameters
if k_ma > 0:
Y = endog[k:]
X = lagmat(endog, k, trim='both')
params_ar = np.linalg.pinv(X).dot(Y)
residuals = Y - np.dot(X, params_ar)
# Run an ARMA(p,q) model using the just computed residuals as data
Y = endog[r:]
X = np.empty((Y.shape[0], 0))
if k_trend > 0:
if trend_data is None:
raise ValueError('Trend data must be provided if'
' `k_trend` > 0.')
X = np.c_[X, trend_data[:(-r if r > 0 else None), :]]
if k_ar > 0:
cols = polynomial_ar.nonzero()[0][1:] - 1
X = np.c_[X, lagmat(endog, k_ar)[r:, cols]]
if k_ma > 0:
cols = polynomial_ma.nonzero()[0][1:] - 1
X = np.c_[X, lagmat(residuals, k_ma)[r-k:, cols]]
# Get the array of [ar_params, ma_params]
params = np.linalg.pinv(X).dot(Y)
residuals = Y - np.dot(X, params)
# Default output
params_trend = []
params_ar = []
params_ma = []
params_variance = []
# Get the params
offset = 0
if k_trend > 0:
params_trend = params[offset:k_trend + offset]
offset += k_trend
if k_ar > 0:
params_ar = params[offset:k_params_ar + offset]
offset += k_params_ar
if k_ma > 0:
params_ma = params[offset:k_params_ma + offset]
offset += k_params_ma
if residuals is not None:
params_variance = (residuals[k_params_ma:]**2).mean()
return (params_trend, params_ar, params_ma,
params_variance)
@property
def start_params(self):
"""
Starting parameters for maximum likelihood estimation
"""
# Perform differencing if necessary (i.e. if simple differencing is
# false so that the state-space model will use the entire dataset)
trend_data = self._trend_data
if not self.simple_differencing and (
self._k_diff > 0 or self._k_seasonal_diff > 0):
endog = diff(self.endog, self._k_diff,
self._k_seasonal_diff, self.k_seasons)
if self.exog is not None:
exog = diff(self.exog, self._k_diff,
self._k_seasonal_diff, self.k_seasons)
else:
exog = None
trend_data = trend_data[:endog.shape[0], :]
else:
endog = self.endog.copy()
exog = self.exog.copy() if self.exog is not None else None
endog = endog.squeeze()
# Although the Kalman filter can deal with missing values in endog,
# conditional sum of squares cannot
if np.any(np.isnan(endog)):
endog = endog[~np.isnan(endog)]
if exog is not None:
exog = exog[~np.isnan(endog)]
if trend_data is not None:
trend_data = trend_data[~np.isnan(endog)]
# Regression effects via OLS
params_exog = []
if self.k_exog > 0:
params_exog = np.linalg.pinv(exog).dot(endog)
endog -= np.dot(exog, params_exog)
if self.state_regression:
params_exog = []
# Non-seasonal ARMA component and trend
(params_trend, params_ar, params_ma,
params_variance) = self._conditional_sum_squares(
endog, self.k_ar, self.polynomial_ar, self.k_ma,
self.polynomial_ma, self.k_trend, trend_data
)
# If we have estimated non-stationary start parameters but enforce
# stationarity is on, raise an error
invalid_ar = (
self.k_ar > 0 and
self.enforce_stationarity and
not is_invertible(np.r_[1, -params_ar])
)
if invalid_ar:
raise ValueError('Non-stationary starting autoregressive'
' parameters found with `enforce_stationarity`'
' set to True.')
# If we have estimated non-invertible start parameters but enforce
# invertibility is on, raise an error
invalid_ma = (
self.k_ma > 0 and
self.enforce_invertibility and
not is_invertible(np.r_[1, params_ma])
)
if invalid_ma:
raise ValueError('non-invertible starting MA parameters found'
' with `enforce_invertibility` set to True.')
# Seasonal Parameters
_, params_seasonal_ar, params_seasonal_ma, params_seasonal_variance = (
self._conditional_sum_squares(
endog, self.k_seasonal_ar, self.polynomial_seasonal_ar,
self.k_seasonal_ma, self.polynomial_seasonal_ma
)
)
# If we have estimated non-stationary start parameters but enforce
# stationarity is on, raise an error
invalid_seasonal_ar = (
self.k_seasonal_ar > 0 and
self.enforce_stationarity and
not is_invertible(np.r_[1, -params_seasonal_ar])
)
if invalid_seasonal_ar:
raise ValueError('Non-stationary starting autoregressive'
' parameters found with `enforce_stationarity`'
' set to True.')
# If we have estimated non-invertible start parameters but enforce
# invertibility is on, raise an error
invalid_seasonal_ma = (
self.k_seasonal_ma > 0 and
self.enforce_invertibility and
not is_invertible(np.r_[1, params_seasonal_ma])
)
if invalid_seasonal_ma:
raise ValueError('non-invertible starting seasonal moving average'
' parameters found with `enforce_invertibility`'
' set to True.')
# Variances
params_exog_variance = []
if self.state_regression and self.time_varying_regression:
# TODO how to set the initial variance parameters?
params_exog_variance = [1] * self.k_exog
if self.state_error and params_variance == []:
if not params_seasonal_variance == []:
params_variance = params_seasonal_variance
elif self.k_exog > 0:
params_variance = np.dot(endog, endog)
else:
params_variance = 1
params_measurement_variance = 1 if self.measurement_error else []
# Combine all parameters
return np.r_[
params_trend,
params_exog,
params_ar,
params_ma,
params_seasonal_ar,
params_seasonal_ma,
params_exog_variance,
params_measurement_variance,
params_variance
]
@property
def endog_names(self, latex=False):
"""Names of endogenous variables"""
diff = ''
if self.k_diff > 0:
if self.k_diff == 1:
diff = '\Delta' if latex else 'D'
else:
diff = ('\Delta^%d' if latex else 'D%d') % self.k_diff
seasonal_diff = ''
if self.k_seasonal_diff > 0:
if self.k_seasonal_diff == 1:
seasonal_diff = (('\Delta_%d' if latex else 'DS%d') %
(self.k_seasons))
else:
seasonal_diff = (('\Delta_%d^%d' if latex else 'D%dS%d') %
(self.k_seasonal_diff, self.k_seasons))
endog_diff = self.simple_differencing
if endog_diff and self.k_diff > 0 and self.k_seasonal_diff > 0:
return (('%s%s %s' if latex else '%s.%s.%s') %
(diff, seasonal_diff, self.data.ynames))
elif endog_diff and self.k_diff > 0:
return (('%s %s' if latex else '%s.%s') %
(diff, self.data.ynames))
elif endog_diff and self.k_seasonal_diff > 0:
return (('%s %s' if latex else '%s.%s') %
(seasonal_diff, self.data.ynames))
else:
return self.data.ynames
params_complete = [
'trend', 'exog', 'ar', 'ma', 'seasonal_ar', 'seasonal_ma',
'exog_variance', 'measurement_variance', 'variance'
]
@property
def param_terms(self):
"""
List of parameters actually included in the model, in sorted order.
TODO Make this an OrderedDict with slice or indices as the values.
"""
model_orders = self.model_orders
# Get basic list from model orders
params = [
order for order in self.params_complete
if model_orders[order] > 0
]
# k_exog may be positive without associated parameters if it is in the
# state vector
if 'exog' in params and not self.mle_regression:
params.remove('exog')
return params
@property
def param_names(self):
"""
List of human readable parameter names (for parameters actually
included in the model).
"""
params_sort_order = self.param_terms
model_names = self.model_names
return [
name for param in params_sort_order for name in model_names[param]
]
@property
def model_orders(self):
"""
The orders of each of the polynomials in the model.
"""
return {
'trend': self.k_trend,
'exog': self.k_exog,
'ar': self.k_ar,
'ma': self.k_ma,
'seasonal_ar': self.k_seasonal_ar,
'seasonal_ma': self.k_seasonal_ma,
'reduced_ar': self.k_ar + self.k_seasonal_ar,
'reduced_ma': self.k_ma + self.k_seasonal_ma,
'exog_variance': self.k_exog if (
self.state_regression and self.time_varying_regression) else 0,
'measurement_variance': int(self.measurement_error),
'variance': int(self.state_error),
}
@property
def model_names(self):
"""
The plain text names of all possible model parameters.
"""
return self._get_model_names(latex=False)
@property
def model_latex_names(self):
"""
The latex names of all possible model parameters.
"""
return self._get_model_names(latex=True)
def _get_model_names(self, latex=False):
names = {
'trend': None,
'exog': None,
'ar': None,
'ma': None,
'seasonal_ar': None,
'seasonal_ma': None,
'reduced_ar': None,
'reduced_ma': None,
'exog_variance': None,
'measurement_variance': None,
'variance': None,
}
# Trend
if self.k_trend > 0:
trend_template = 't_%d' if latex else 'trend.%d'
names['trend'] = []
for i in self.polynomial_trend.nonzero()[0]:
if i == 0:
names['trend'].append('intercept')
elif i == 1:
names['trend'].append('drift')
else:
names['trend'].append(trend_template % i)
# Exogenous coefficients
if self.k_exog > 0:
names['exog'] = self.exog_names
# Autoregressive
if self.k_ar > 0:
ar_template = '$\\phi_%d$' if latex else 'ar.L%d'
names['ar'] = []
for i in self.polynomial_ar.nonzero()[0][1:]:
names['ar'].append(ar_template % i)
# Moving Average
if self.k_ma > 0:
ma_template = '$\\theta_%d$' if latex else 'ma.L%d'
names['ma'] = []
for i in self.polynomial_ma.nonzero()[0][1:]:
names['ma'].append(ma_template % i)
# Seasonal Autoregressive
if self.k_seasonal_ar > 0:
seasonal_ar_template = (
'$\\tilde \\phi_%d$' if latex else 'ar.S.L%d'
)
names['seasonal_ar'] = []
for i in self.polynomial_seasonal_ar.nonzero()[0][1:]:
names['seasonal_ar'].append(seasonal_ar_template % i)
# Seasonal Moving Average
if self.k_seasonal_ma > 0:
seasonal_ma_template = (
'$\\tilde \\theta_%d$' if latex else 'ma.S.L%d'
)
names['seasonal_ma'] = []
for i in self.polynomial_seasonal_ma.nonzero()[0][1:]:
names['seasonal_ma'].append(seasonal_ma_template % i)
# Reduced Form Autoregressive
if self.k_ar > 0 or self.k_seasonal_ar > 0:
reduced_polynomial_ar = reduced_polynomial_ar = -np.polymul(
self.polynomial_ar, self.polynomial_seasonal_ar
)
ar_template = '$\\Phi_%d$' if latex else 'ar.R.L%d'
names['reduced_ar'] = []
for i in reduced_polynomial_ar.nonzero()[0][1:]:
names['reduced_ar'].append(ar_template % i)
# Reduced Form Moving Average
if self.k_ma > 0 or self.k_seasonal_ma > 0:
reduced_polynomial_ma = np.polymul(
self.polynomial_ma, self.polynomial_seasonal_ma
)
ma_template = '$\\Theta_%d$' if latex else 'ma.R.L%d'
names['reduced_ma'] = []
for i in reduced_polynomial_ma.nonzero()[0][1:]:
names['reduced_ma'].append(ma_template % i)
# Exogenous variances
if self.state_regression and self.time_varying_regression:
exog_var_template = '$\\sigma_\\text{%s}^2$' if latex else 'var.%s'
names['exog_variance'] = [
exog_var_template % exog_name for exog_name in self.exog_names
]
# Measurement error variance
if self.measurement_error:
meas_var_tpl = (
'$\\sigma_\\eta^2$' if latex else 'var.measurement_error'
)
names['measurement_variance'] = [meas_var_tpl]
# State variance
if self.state_error:
var_tpl = '$\\sigma_\\zeta^2$' if latex else 'sigma2'
names['variance'] = [var_tpl]
return names
def transform_params(self, unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation.
Used primarily to enforce stationarity of the autoregressive lag
polynomial, invertibility of the moving average lag polynomial, and
positive variance parameters.
Parameters
----------
unconstrained : array_like
Unconstrained parameters used by the optimizer.
Returns
-------
constrained : array_like
Constrained parameters used in likelihood evaluation.
Notes
-----
If the lag polynomial has non-consecutive powers (so that the
coefficient is zero on some element of the polynomial), then the
constraint function is not onto the entire space of invertible
polynomials, although it only excludes a very small portion very close
to the invertibility boundary.
"""
unconstrained = np.array(unconstrained, ndmin=1)
constrained = np.zeros(unconstrained.shape, unconstrained.dtype)
start = end = 0
# Retain the trend parameters
if self.k_trend > 0:
end += self.k_trend
constrained[start:end] = unconstrained[start:end]
start += self.k_trend
# Retain any MLE regression coefficients
if self.mle_regression:
end += self.k_exog
constrained[start:end] = unconstrained[start:end]
start += self.k_exog
# Transform the AR parameters (phi) to be stationary
if self.k_ar_params > 0:
end += self.k_ar_params
if self.enforce_stationarity:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_ar_params
# Transform the MA parameters (theta) to be invertible
if self.k_ma_params > 0:
end += self.k_ma_params
if self.enforce_invertibility:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_ma_params
# Transform the seasonal AR parameters (\tilde phi) to be stationary
if self.k_seasonal_ar > 0:
end += self.k_seasonal_ar_params
if self.enforce_stationarity:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_seasonal_ar_params
# Transform the seasonal MA parameters (\tilde theta) to be invertible
if self.k_seasonal_ma_params > 0:
end += self.k_seasonal_ma_params
if self.enforce_invertibility:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_seasonal_ma_params
# Transform the standard deviation parameters to be positive
if self.state_regression and self.time_varying_regression:
end += self.k_exog
constrained[start:end] = unconstrained[start:end]**2
start += self.k_exog
if self.measurement_error:
constrained[start] = unconstrained[start]**2
start += 1
end += 1
if self.state_error:
constrained[start] = unconstrained[start]**2
# start += 1
# end += 1
return constrained
def untransform_params(self, constrained):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Used primarily to reverse enforcement of stationarity of the
autoregressive lag polynomial and invertibility of the moving average
lag polynomial.
Parameters
----------
constrained : array_like
Constrained parameters used in likelihood evaluation.
Returns
-------
constrained : array_like
Unconstrained parameters used by the optimizer.
Notes
-----
If the lag polynomial has non-consecutive powers (so that the
coefficient is zero on some element of the polynomial), then the
constraint function is not onto the entire space of invertible
polynomials, although it only excludes a very small portion very close
to the invertibility boundary.
"""
constrained = np.array(constrained, ndmin=1)
unconstrained = np.zeros(constrained.shape, constrained.dtype)
start = end = 0
# Retain the trend parameters
if self.k_trend > 0:
end += self.k_trend
unconstrained[start:end] = constrained[start:end]
start += self.k_trend
# Retain any MLE regression coefficients
if self.mle_regression:
end += self.k_exog
unconstrained[start:end] = constrained[start:end]
start += self.k_exog
# Transform the AR parameters (phi) to be stationary
if self.k_ar_params > 0:
end += self.k_ar_params
if self.enforce_stationarity:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_ar_params
# Transform the MA parameters (theta) to be invertible
if self.k_ma_params > 0:
end += self.k_ma_params
if self.enforce_invertibility:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_ma_params
# Transform the seasonal AR parameters (\tilde phi) to be stationary
if self.k_seasonal_ar > 0:
end += self.k_seasonal_ar_params
if self.enforce_stationarity:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_seasonal_ar_params
# Transform the seasonal MA parameters (\tilde theta) to be invertible
if self.k_seasonal_ma_params > 0:
end += self.k_seasonal_ma_params
if self.enforce_invertibility:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_seasonal_ma_params
# Untransform the standard deviation
if self.state_regression and self.time_varying_regression:
end += self.k_exog
unconstrained[start:end] = constrained[start:end]**0.5
start += self.k_exog
if self.measurement_error:
unconstrained[start] = constrained[start]**0.5
start += 1
end += 1
if self.state_error:
unconstrained[start] = constrained[start]**0.5
# start += 1
# end += 1
return unconstrained
def update(self, params, transformed=True):
"""
Update the parameters of the model
Updates the representation matrices to fill in the new parameter
values.
Parameters
----------
params : array_like
Array of new parameters.
transformed : boolean, optional
Whether or not `params` is already transformed. If set to False,
`transform_params` is called. Default is True..
Returns
-------
params : array_like
Array of parameters.
"""
params = super(SARIMAX, self).update(params, transformed)
params_trend = None
params_exog = None
params_ar = None
params_ma = None
params_seasonal_ar = None
params_seasonal_ma = None
params_exog_variance = None
params_measurement_variance = None
params_variance = None
# Extract the parameters
start = end = 0
end += self.k_trend
params_trend = params[start:end]
start += self.k_trend
if self.mle_regression:
end += self.k_exog
params_exog = params[start:end]
start += self.k_exog
end += self.k_ar_params
params_ar = params[start:end]
start += self.k_ar_params
end += self.k_ma_params
params_ma = params[start:end]
start += self.k_ma_params
end += self.k_seasonal_ar_params
params_seasonal_ar = params[start:end]
start += self.k_seasonal_ar_params
end += self.k_seasonal_ma_params
params_seasonal_ma = params[start:end]
start += self.k_seasonal_ma_params
if self.state_regression and self.time_varying_regression:
end += self.k_exog
params_exog_variance = params[start:end]
start += self.k_exog
if self.measurement_error:
params_measurement_variance = params[start]
start += 1
end += 1
if self.state_error:
params_variance = params[start]
# start += 1
# end += 1
# Update lag polynomials
if self.k_ar > 0:
if self.polynomial_ar.dtype == params.dtype:
self.polynomial_ar[self._polynomial_ar_idx] = -params_ar
else:
polynomial_ar = self.polynomial_ar.real.astype(params.dtype)
polynomial_ar[self._polynomial_ar_idx] = -params_ar
self.polynomial_ar = polynomial_ar
if self.k_ma > 0:
if self.polynomial_ma.dtype == params.dtype:
self.polynomial_ma[self._polynomial_ma_idx] = params_ma
else:
polynomial_ma = self.polynomial_ma.real.astype(params.dtype)
polynomial_ma[self._polynomial_ma_idx] = params_ma
self.polynomial_ma = polynomial_ma
if self.k_seasonal_ar > 0:
idx = self._polynomial_seasonal_ar_idx
if self.polynomial_seasonal_ar.dtype == params.dtype:
self.polynomial_seasonal_ar[idx] = -params_seasonal_ar
else:
polynomial_seasonal_ar = (
self.polynomial_seasonal_ar.real.astype(params.dtype)
)
polynomial_seasonal_ar[idx] = -params_seasonal_ar
self.polynomial_seasonal_ar = polynomial_seasonal_ar
if self.k_seasonal_ma > 0:
idx = self._polynomial_seasonal_ma_idx
if self.polynomial_seasonal_ma.dtype == params.dtype:
self.polynomial_seasonal_ma[idx] = params_seasonal_ma
else:
polynomial_seasonal_ma = (
self.polynomial_seasonal_ma.real.astype(params.dtype)
)
polynomial_seasonal_ma[idx] = params_seasonal_ma
self.polynomial_seasonal_ma = polynomial_seasonal_ma
# Get the reduced form lag polynomial terms by multiplying the regular
# and seasonal lag polynomials
# Note: that although the numpy np.polymul examples assume that they
# are ordered from highest degree to lowest, whereas our are from
# lowest to highest, it does not matter.
if self.k_seasonal_ar > 0:
reduced_polynomial_ar = -np.polymul(
self.polynomial_ar, self.polynomial_seasonal_ar
)
else:
reduced_polynomial_ar = -self.polynomial_ar
if self.k_seasonal_ma > 0:
reduced_polynomial_ma = np.polymul(
self.polynomial_ma, self.polynomial_seasonal_ma
)
else:
reduced_polynomial_ma = self.polynomial_ma
# Observation intercept
# Exogenous data with MLE estimation of parameters enters through a
# time-varying observation intercept (is equivalent to simply
# subtracting it out of the endogenous variable first)
if self.mle_regression:
self.ssm['obs_intercept'] = np.dot(self.exog, params_exog)[None, :]
# State intercept (Harvey) or additional observation intercept
# (Hamilton)
# SARIMA trend enters through the a time-varying state intercept,
# associated with the first row of the stationary component of the
# state vector (i.e. the first element of the state vector following
# any differencing elements)
if self.k_trend > 0:
data = np.dot(self._trend_data, params_trend).astype(params.dtype)
if not self.hamilton_representation:
self.ssm['state_intercept', self._k_states_diff, :] = data
else:
# The way the trend enters in the Hamilton representation means
# that the parameter is not an ``intercept'' but instead the
# mean of the process. The trend values in `data` are meant for
# an intercept, and so must be transformed to represent the
# mean instead
if self.hamilton_representation:
data /= np.sum(-reduced_polynomial_ar)
# If we already set the observation intercept for MLE
# regression, just add to it
if self.mle_regression:
self.ssm.obs_intercept += data[None, :]
# Otherwise set it directly
else:
self.ssm.obs_intercept = data[None, :]
# Observation covariance matrix
if self.measurement_error:
self.ssm['obs_cov', 0, 0] = params_measurement_variance
# Transition matrix
if self.k_ar > 0 or self.k_seasonal_ar > 0:
self.ssm[self.transition_ar_params_idx] = reduced_polynomial_ar[1:]
elif not self.ssm.transition.dtype == params.dtype:
# This is required if the transition matrix is not really in use
# (e.g. for an MA(q) process) so that it's dtype never changes as
# the parameters' dtype changes. This changes the dtype manually.
self.ssm.transition = self.ssm.transition.real.astype(params.dtype)
# Selection matrix (Harvey) or Design matrix (Hamilton)
if self.k_ma > 0 or self.k_seasonal_ma > 0:
if not self.hamilton_representation:
self.ssm[self.selection_ma_params_idx] = (
reduced_polynomial_ma[1:]
)
else:
self.ssm[self.design_ma_params_idx] = reduced_polynomial_ma[1:]
# State covariance matrix
if self.k_posdef > 0:
self.ssm['state_cov', 0, 0] = params_variance
if self.state_regression and self.time_varying_regression:
self.ssm[self._exog_variance_idx] = params_exog_variance
# Initialize
if not self._manual_initialization:
self.initialize_state()
return params
class SARIMAXResults(MLEResults):
"""
Class to hold results from fitting an SARIMAX model.
Parameters
----------
model : SARIMAX instance
The fitted model instance
Attributes
----------
specification : dictionary
Dictionary including all attributes from the SARIMAX model instance.
polynomial_ar : array
Array containing autoregressive lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_ma : array
Array containing moving average lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_seasonal_ar : array
Array containing seasonal autoregressive lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_seasonal_ma : array
Array containing seasonal moving average lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_trend : array
Array containing trend polynomial coefficients, ordered from lowest
degree to highest. Initialized with ones, unless a coefficient is
constrained to be zero (in which case it is zero).
model_orders : list of int
The orders of each of the polynomials in the model.
param_terms : list of str
List of parameters actually included in the model, in sorted order.
See Also
--------
statsmodels.tsa.statespace.kalman_filter.FilterResults
statsmodels.tsa.statespace.mlemodel.MLEResults
"""
def __init__(self, model, params, filter_results, cov_type='opg', **kwargs):
super(SARIMAXResults, self).__init__(model, params, filter_results,
cov_type, **kwargs)
self.df_resid = np.inf # attribute required for wald tests
self.specification = Bunch(**{
# Set additional model parameters
'k_seasons': self.model.k_seasons,
'measurement_error': self.model.measurement_error,
'time_varying_regression': self.model.time_varying_regression,
'mle_regression': self.model.mle_regression,
'simple_differencing': self.model.simple_differencing,
'enforce_stationarity': self.model.enforce_stationarity,
'enforce_invertibility': self.model.enforce_invertibility,
'hamilton_representation': self.model.hamilton_representation,
'order': self.model.order,
'seasonal_order': self.model.seasonal_order,
# Model order
'k_diff': self.model.k_diff,
'k_seasonal_diff': self.model.k_seasonal_diff,
'k_ar': self.model.k_ar,
'k_ma': self.model.k_ma,
'k_seasonal_ar': self.model.k_seasonal_ar,
'k_seasonal_ma': self.model.k_seasonal_ma,
# Param Numbers
'k_ar_params': self.model.k_ar_params,
'k_ma_params': self.model.k_ma_params,
# Trend / Regression
'trend': self.model.trend,
'k_trend': self.model.k_trend,
'k_exog': self.model.k_exog,
'mle_regression': self.model.mle_regression,
'state_regression': self.model.state_regression,
})
# Polynomials
self.polynomial_trend = self.model.polynomial_trend
self.polynomial_ar = self.model.polynomial_ar
self.polynomial_ma = self.model.polynomial_ma
self.polynomial_seasonal_ar = self.model.polynomial_seasonal_ar
self.polynomial_seasonal_ma = self.model.polynomial_seasonal_ma
self.polynomial_reduced_ar = np.polymul(
self.polynomial_ar, self.polynomial_seasonal_ar
)
self.polynomial_reduced_ma = np.polymul(
self.polynomial_ma, self.polynomial_seasonal_ma
)
# Distinguish parameters
self.model_orders = self.model.model_orders
self.param_terms = self.model.param_terms
start = end = 0
for name in self.param_terms:
end += self.model_orders[name]
setattr(self, '_params_%s' % name, self.params[start:end])
start += self.model_orders[name]
@cache_readonly
def arroots(self):
"""
(array) Roots of the reduced form autoregressive lag polynomial
"""
return np.roots(self.polynomial_reduced_ar)**-1
@cache_readonly
def maroots(self):
"""
(array) Roots of the reduced form moving average lag polynomial
"""
return np.roots(self.polynomial_reduced_ma)**-1
@cache_readonly
def arfreq(self):
"""
(array) Frequency of the roots of the reduced form autoregressive
lag polynomial
"""
z = self.arroots
if not z.size:
return
return np.arctan2(z.imag, z.real) / (2 * np.pi)
@cache_readonly
def mafreq(self):
"""
(array) Frequency of the roots of the reduced form moving average
lag polynomial
"""
z = self.maroots
if not z.size:
return
return np.arctan2(z.imag, z.real) / (2 * np.pi)
@cache_readonly
def arparams(self):
"""
(array) Autoregressive parameters actually estimated in the model.
Does not include parameters whose values are constrained to be zero.
"""
return self._params_ar
@cache_readonly
def maparams(self):
"""
(array) Moving average parameters actually estimated in the model.
Does not include parameters whose values are constrained to be zero.
"""
return self._params_ma
def predict(self, start=None, end=None, exog=None, dynamic=False,
**kwargs):
"""
In-sample prediction and out-of-sample forecasting
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
exog : array_like, optional
If the model includes exogenous regressors, you must provide
exactly enough out-of-sample values for the exogenous variables if
end is beyond the last observation in the sample.
dynamic : boolean, int, str, or datetime, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Can also be an absolute date string to parse or a
datetime type (these are not interpreted as offsets).
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
full_results : boolean, optional
If True, returns a FilterResults instance; if False returns a
tuple with forecasts, the forecast errors, and the forecast error
covariance matrices. Default is False.
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : array
Array of out of sample forecasts.
"""
if start is None:
start = 0
# Handle end (e.g. date)
_start = self.model._get_predict_start(start)
_end, _out_of_sample = self.model._get_predict_end(end)
# Handle exogenous parameters
if _out_of_sample and (self.model.k_exog + self.model.k_trend > 0):
# Create a new faux SARIMAX model for the extended dataset
nobs = self.model.data.orig_endog.shape[0] + _out_of_sample
endog = np.zeros((nobs, self.model.k_endog))
if self.model.k_exog > 0:
if exog is None:
raise ValueError('Out-of-sample forecasting in a model'
' with a regression component requires'
' additional exogenous values via the'
' `exog` argument.')
exog = np.array(exog)
required_exog_shape = (_out_of_sample, self.model.k_exog)
if not exog.shape == required_exog_shape:
raise ValueError('Provided exogenous values are not of the'
' appropriate shape. Required %s, got %s.'
% (str(required_exog_shape),
str(exog.shape)))
exog = np.c_[self.model.data.orig_exog.T, exog.T].T
# TODO replace with init_kwds or specification or similar
model = SARIMAX(
endog,
exog=exog,
order=self.model.order,
seasonal_order=self.model.seasonal_order,
trend=self.model.trend,
measurement_error=self.model.measurement_error,
time_varying_regression=self.model.time_varying_regression,
mle_regression=self.model.mle_regression,
simple_differencing=self.model.simple_differencing,
enforce_stationarity=self.model.enforce_stationarity,
enforce_invertibility=self.model.enforce_invertibility,
hamilton_representation=self.model.hamilton_representation
)
model.update(self.params)
# Set the kwargs with the update time-varying state space
# representation matrices
for name in self.filter_results.shapes.keys():
if name == 'obs':
continue
mat = getattr(model.ssm, name)
if mat.shape[-1] > 1:
if len(mat.shape) == 2:
kwargs[name] = mat[:, -_out_of_sample:]
else:
kwargs[name] = mat[:, :, -_out_of_sample:]
elif self.model.k_exog == 0 and exog is not None:
warn('Exogenous array provided to predict, but additional data not'
' required. `exog` argument ignored.')
return super(SARIMAXResults, self).predict(
start=start, end=end, exog=exog, dynamic=dynamic, **kwargs
)
def forecast(self, steps=1, exog=None, **kwargs):
"""
Out-of-sample forecasts
Parameters
----------
steps : int, optional
The number of out of sample forecasts from the end of the
sample. Default is 1.
exog : array_like, optional
If the model includes exogenous regressors, you must provide
exactly enough out-of-sample values for the exogenous variables for
each step forecasted.
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : array
Array of out of sample forecasts.
"""
return super(SARIMAXResults, self).forecast(steps, exog=exog, **kwargs)
def summary(self, alpha=.05, start=None):
# Create the model name
# See if we have an ARIMA component
order = ''
if self.model.k_ar + self.model.k_diff + self.model.k_ma > 0:
if self.model.k_ar == self.model.k_ar_params:
order_ar = self.model.k_ar
else:
order_ar = tuple(self.polynomial_ar.nonzero()[0][1:])
if self.model.k_ma == self.model.k_ma_params:
order_ma = self.model.k_ma
else:
order_ma = tuple(self.polynomial_ma.nonzero()[0][1:])
# If there is simple differencing, then that is reflected in the
# dependent variable name
k_diff = 0 if self.model.simple_differencing else self.model.k_diff
order = '(%s, %d, %s)' % (order_ar, k_diff, order_ma)
# See if we have an SARIMA component
seasonal_order = ''
has_seasonal = (
self.model.k_seasonal_ar +
self.model.k_seasonal_diff +
self.model.k_seasonal_ma
) > 0
if has_seasonal:
if self.model.k_ar == self.model.k_ar_params:
order_seasonal_ar = (
int(self.model.k_seasonal_ar / self.model.k_seasons)
)
else:
order_seasonal_ar = (
tuple(self.polynomial_seasonal_ar.nonzero()[0][1:])
)
if self.model.k_ma == self.model.k_ma_params:
order_seasonal_ma = (
int(self.model.k_seasonal_ma / self.model.k_seasons)
)
else:
order_seasonal_ma = (
tuple(self.polynomial_seasonal_ma.nonzero()[0][1:])
)
# If there is simple differencing, then that is reflected in the
# dependent variable name
k_seasonal_diff = self.model.k_seasonal_diff
if self.model.simple_differencing:
k_seasonal_diff = 0
seasonal_order = ('(%s, %d, %s, %d)' %
(str(order_seasonal_ar), k_seasonal_diff,
str(order_seasonal_ma), self.model.k_seasons))
if not order == '':
order += 'x'
model_name = (
'%s%s%s' % (self.model.__class__.__name__, order, seasonal_order)
)
return super(SARIMAXResults, self).summary(
alpha=alpha, start=start, model_name=model_name
)
summary.__doc__ = MLEResults.summary.__doc__
class SARIMAXResultsWrapper(MLEResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(MLEResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(MLEResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(SARIMAXResultsWrapper, SARIMAXResults)
| bsd-3-clause |
allenai/document-qa | docqa/doc_qa_models.py | 1 | 13498 | from typing import List, Optional, Dict, Union, Set
import tensorflow as tf
from tensorflow import Tensor
from docqa.data_processing.qa_training_data import ParagraphAndQuestionDataset, ParagraphAndQuestionSpec
from docqa.encoder import DocumentAndQuestionEncoder
from docqa.model import Model, Prediction
from docqa.nn.embedder import WordEmbedder, CharWordEmbedder
from docqa.nn.layers import SequenceMapper, SequenceBiMapper, AttentionMapper, SequenceEncoder, \
SequenceMapperWithContext, MapMulti, SequencePredictionLayer, AttentionPredictionLayer
from docqa.text_preprocessor import TextPreprocessor
from docqa.utils import ResourceLoader
class ParagraphQuestionModel(Model):
"""
Base class for models that take paragraph/questions as input, handles embedding the
text in a modular way.
Its a bit of a hack, but at the moment we leave it up to the client to be aware of and use the `preprocessor`
(if not None) before passing input to `encode`. This is in particular so the preprocessing can be done
only once and before we sort/batch the input data
"""
def __init__(self,
encoder: DocumentAndQuestionEncoder,
word_embed: Optional[WordEmbedder],
char_embed: Optional[CharWordEmbedder] = None,
word_embed_layer: Optional[MapMulti] = None,
preprocessor: Optional[TextPreprocessor] = None):
if word_embed is None and char_embed is None:
raise ValueError()
self.preprocessor = preprocessor
self.word_embed = word_embed
self.char_embed = char_embed
self.word_embed_layer = word_embed_layer
self.encoder = encoder
self._is_train_placeholder = None
def init(self, corpus, loader: ResourceLoader):
if self.word_embed is not None:
self.word_embed.set_vocab(corpus, loader,
None if self.preprocessor is None else self.preprocessor.special_tokens())
if self.char_embed is not None:
self.char_embed.embeder.set_vocab(corpus)
def set_inputs(self, datasets: List[ParagraphAndQuestionDataset], word_vec_loader=None):
voc = set()
for dataset in datasets:
voc.update(dataset.get_vocab())
input_spec = datasets[0].get_spec()
for dataset in datasets[1:]:
input_spec += dataset.get_spec()
return self.set_input_spec(input_spec, voc, word_vec_loader)
def set_input_spec(self, input_spec: ParagraphAndQuestionSpec, voc: Set[str],
word_vec_loader: ResourceLoader=None):
if word_vec_loader is None:
word_vec_loader = ResourceLoader()
if self.word_embed is not None:
self.word_embed.init(word_vec_loader, voc)
if self.char_embed is not None:
self.char_embed.embeder.init(word_vec_loader, voc)
self.encoder.init(input_spec, True, self.word_embed,
None if self.char_embed is None else self.char_embed.embeder)
self._is_train_placeholder = tf.placeholder(tf.bool, ())
return self.encoder.get_placeholders()
def get_placeholders(self):
return self.encoder.get_placeholders() + [self._is_train_placeholder]
def get_predictions_for(self, input_tensors: Dict[Tensor, Tensor]):
is_train = input_tensors[self._is_train_placeholder]
enc = self.encoder
q_mask = input_tensors[enc.question_len]
c_mask = input_tensors[enc.context_len]
q_embed = []
c_embed = []
if enc.question_chars in input_tensors:
with tf.variable_scope("char-embed"):
q, c = self.char_embed.embed(is_train,
(input_tensors[enc.question_chars], input_tensors[enc.question_word_len]),
(input_tensors[enc.context_chars], input_tensors[enc.context_word_len]))
q_embed.append(q)
c_embed.append(c)
if enc.question_words in input_tensors:
with tf.variable_scope("word-embed"):
q, c = self.word_embed.embed(is_train,
(input_tensors[enc.question_words], q_mask),
(input_tensors[enc.context_words], c_mask))
if self.word_embed_layer is not None:
with tf.variable_scope("embed-map"):
q, c = self.word_embed_layer.apply(is_train,
(q, q_mask),
(c, c_mask))
q_embed.append(q)
c_embed.append(c)
if enc.question_features in input_tensors:
q_embed.append(input_tensors.get(enc.question_features))
c_embed.append(input_tensors.get(enc.context_features))
q_embed = tf.concat(q_embed, axis=2)
c_embed = tf.concat(c_embed, axis=2)
answer = [input_tensors[x] for x in enc.answer_encoder.get_placeholders()]
return self._get_predictions_for(is_train, q_embed, q_mask, c_embed, c_mask, answer)
def _get_predictions_for(self,
is_train,
question_embed, question_mask,
context_embed, context_mask,
answer) -> Prediction:
raise NotImplemented()
def encode(self, batch: List, is_train: bool):
data = self.encoder.encode(batch, is_train)
data[self._is_train_placeholder] = is_train
return data
def __getstate__(self):
state = super().__getstate__()
state["_is_train_placeholder"] = None
return state
def __setstate__(self, state):
if "state" in state:
if "preprocessor" not in state["state"]:
state["state"]["preprocessor"] = None
super().__setstate__(state)
class ContextOnly(ParagraphQuestionModel):
def __init__(self, encoder: DocumentAndQuestionEncoder,
word_embed: Optional[WordEmbedder],
char_embed: Optional[CharWordEmbedder],
context_encoder: SequenceMapper,
prediction: SequencePredictionLayer):
super().__init__(encoder, word_embed, char_embed)
self.context_encoder = context_encoder
self.prediction = prediction
def _get_predictions_for(self, is_train,
question_embed, question_mask,
context_embed, context_mask,
answer) -> Prediction:
with tf.variable_scope("encode"):
self.context_encoder.apply(is_train, context_embed, context_mask)
with tf.variable_scope("predict"):
return self.prediction.apply(is_train, context_embed, answer, context_mask)
class Attention(ParagraphQuestionModel):
"""
Model that encodes the question and context, then applies an attention mechanism
between the two to produce a query-aware context representation, which is used to make a prediction.
"""
def __init__(self, encoder: DocumentAndQuestionEncoder,
preprocess: Optional[TextPreprocessor],
word_embed: Optional[WordEmbedder],
word_embed_layer: Optional[MapMulti],
char_embed: Optional[CharWordEmbedder],
embed_mapper: Optional[SequenceMapper],
question_mapper: Optional[SequenceMapper],
context_mapper: Optional[SequenceMapper],
memory_builder: SequenceBiMapper,
attention: AttentionMapper,
match_encoder: SequenceMapper,
predictor: Union[SequencePredictionLayer, AttentionPredictionLayer]):
super().__init__(encoder, word_embed, char_embed, word_embed_layer, preprocess)
self.embed_mapper = embed_mapper
self.question_mapper = question_mapper
self.context_mapper = context_mapper
self.memory_builder = memory_builder
self.attention = attention
self.match_encoder = match_encoder
self.predictor = predictor
def _get_predictions_for(self, is_train,
question_rep, question_mask,
context_rep, context_mask,
answer) -> Prediction:
if self.embed_mapper is not None:
with tf.variable_scope("map_embed"):
context_rep = self.embed_mapper.apply(is_train, context_rep, context_mask)
with tf.variable_scope("map_embed", reuse=True):
question_rep = self.embed_mapper.apply(is_train, question_rep, question_mask)
if self.question_mapper is not None:
with tf.variable_scope("map_question"):
question_rep = self.question_mapper.apply(is_train, question_rep, question_mask)
if self.context_mapper is not None:
with tf.variable_scope("map_context"):
context_rep = self.context_mapper.apply(is_train, context_rep, context_mask)
with tf.variable_scope("buid_memories"):
keys, memories = self.memory_builder.apply(is_train, question_rep, question_mask)
with tf.variable_scope("apply_attention"):
context_rep = self.attention.apply(is_train, context_rep, keys, memories, context_mask, question_mask)
if self.match_encoder is not None:
with tf.variable_scope("process_attention"):
context_rep = self.match_encoder.apply(is_train, context_rep, context_mask)
with tf.variable_scope("predict"):
if isinstance(self.predictor, AttentionPredictionLayer):
return self.predictor.apply(is_train, context_rep, question_rep, answer, context_mask, question_mask)
else:
return self.predictor.apply(is_train, context_rep, answer, context_mask)
class AttentionAndEncode(ParagraphQuestionModel):
def __init__(self, encoder: DocumentAndQuestionEncoder,
word_embed: Optional[WordEmbedder],
word_embed_layer: Optional[MapMulti],
char_embed: Optional[CharWordEmbedder],
embed_mapper: Optional[SequenceMapper],
question_mapper: Optional[SequenceMapper],
question_encoder: SequenceEncoder,
context_mapper: Optional[SequenceMapper],
memory_builder: SequenceBiMapper,
attention: AttentionMapper,
post_attention_mapper: Optional[SequenceMapper],
contextual_mapper: SequenceMapperWithContext,
post_context_mapper: Optional[SequenceMapper],
predictor: SequencePredictionLayer):
super().__init__(encoder, word_embed, char_embed, word_embed_layer)
self.question_encoder = question_encoder
self.embed_mapper = embed_mapper
self.question_mapper = question_mapper
self.context_mapper = context_mapper
self.memory_builder = memory_builder
self.contextual_mapper = contextual_mapper
self.attention = attention
self.post_attention_mapper = post_attention_mapper
self.post_context_mapper = post_context_mapper
self.predictor = predictor
def _get_predictions_for(self, is_train,
question_rep, question_mask,
context_rep, context_mask,
answer) -> Prediction:
if self.embed_mapper is not None:
with tf.variable_scope("map_embed"):
context_rep = self.embed_mapper.apply(is_train, context_rep, context_mask)
with tf.variable_scope("map_embed", reuse=True):
question_rep = self.embed_mapper.apply(is_train, question_rep, question_mask)
if self.question_mapper is not None:
with tf.variable_scope("map_question"):
question_rep = self.question_mapper.apply(is_train, question_rep, question_mask)
if self.context_mapper is not None:
with tf.variable_scope("map_context"):
context_rep = self.context_mapper.apply(is_train, context_rep, context_mask)
with tf.variable_scope("build_memories"):
keys, memories = self.memory_builder.apply(is_train, question_rep, question_mask)
with tf.variable_scope("apply_attention"):
context_rep = self.attention.apply(is_train, context_rep, keys, memories, context_mask, question_mask)
if self.post_attention_mapper is not None:
with tf.variable_scope("process_attention"):
context_rep = self.post_attention_mapper.apply(is_train, context_rep, context_mask)
with tf.variable_scope("encode_question"):
question_encoded = self.question_encoder.apply(is_train, question_rep, question_mask)
with tf.variable_scope("map_with_context"):
context_rep = self.contextual_mapper.apply(is_train, context_rep, question_encoded, context_mask)
if self.post_context_mapper is not None:
with tf.variable_scope("process_context_mapped"):
context_rep = self.post_context_mapper.apply(is_train, context_rep, context_mask)
with tf.variable_scope("predict"):
return self.predictor.apply(is_train, context_rep, answer, context_mask)
| apache-2.0 |
google/referring-manipulation | third_party/blended_diffusion/guided_diffusion/guided_diffusion/dist_util.py | 2 | 2424 | """
Helpers for distributed training.
"""
import io
import os
import socket
import blobfile as bf
from mpi4py import MPI
import torch as th
import torch.distributed as dist
# Change this to reflect your cluster layout.
# The GPU for a given rank is (rank % GPUS_PER_NODE).
GPUS_PER_NODE = 8
SETUP_RETRY_COUNT = 3
def setup_dist():
"""
Setup a distributed process group.
"""
if dist.is_initialized():
return
os.environ["CUDA_VISIBLE_DEVICES"] = f"{MPI.COMM_WORLD.Get_rank() % GPUS_PER_NODE}"
comm = MPI.COMM_WORLD
backend = "gloo" if not th.cuda.is_available() else "nccl"
if backend == "gloo":
hostname = "localhost"
else:
hostname = socket.gethostbyname(socket.getfqdn())
os.environ["MASTER_ADDR"] = comm.bcast(hostname, root=0)
os.environ["RANK"] = str(comm.rank)
os.environ["WORLD_SIZE"] = str(comm.size)
port = comm.bcast(_find_free_port(), root=0)
os.environ["MASTER_PORT"] = str(port)
dist.init_process_group(backend=backend, init_method="env://")
def dev():
"""
Get the device to use for torch.distributed.
"""
if th.cuda.is_available():
return th.device(f"cuda")
return th.device("cpu")
def load_state_dict(path, **kwargs):
"""
Load a PyTorch file without redundant fetches across MPI ranks.
"""
chunk_size = 2 ** 30 # MPI has a relatively small size limit
if MPI.COMM_WORLD.Get_rank() == 0:
with bf.BlobFile(path, "rb") as f:
data = f.read()
num_chunks = len(data) // chunk_size
if len(data) % chunk_size:
num_chunks += 1
MPI.COMM_WORLD.bcast(num_chunks)
for i in range(0, len(data), chunk_size):
MPI.COMM_WORLD.bcast(data[i : i + chunk_size])
else:
num_chunks = MPI.COMM_WORLD.bcast(None)
data = bytes()
for _ in range(num_chunks):
data += MPI.COMM_WORLD.bcast(None)
return th.load(io.BytesIO(data), **kwargs)
def sync_params(params):
"""
Synchronize a sequence of Tensors across ranks from rank 0.
"""
for p in params:
with th.no_grad():
dist.broadcast(p, 0)
def _find_free_port():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
finally:
s.close()
| apache-2.0 |
tqchen/tinyflow | example/mnist_lenet.py | 2 | 2061 | """TinyFlow Example: LeNet for Digits classification.
This code uses automatic variable shape inference for shorter code.
"""
import tinyflow as tf
from tinyflow.datasets import get_mnist
# Create the model
x = tf.placeholder(tf.float32)
conv1 = tf.nn.conv2d(x, num_filter=20, ksize=[1, 5, 5, 1], name="conv1", no_bias=False)
tanh1 = tf.tanh(conv1)
pool1 = tf.nn.max_pool(tanh1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1])
conv2 = tf.nn.conv2d(pool1, num_filter=50, ksize=[1, 5, 5, 1], name="conv2", no_bias=False)
tanh2 = tf.tanh(conv2)
pool2 = tf.nn.max_pool(tanh2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1])
flatten = tf.nn.flatten_layer(pool2)
fc1 = tf.nn.linear(flatten, num_hidden=500, name="fc1")
tanh3 = tf.tanh(fc1)
fc2 = tf.nn.linear(tanh3, num_hidden=10, name="fc2")
# define loss
label = tf.placeholder(tf.float32)
cross_entropy = tf.nn.mean_sparse_softmax_cross_entropy_with_logits(fc2, label)
train_step = tf.train.AdamOptimizer(0.005).minimize(cross_entropy)
sess = tf.Session(config='gpu')
# Auromatic variable shape inference API, infers the shape and initialize the weights.
known_shape = {x: [100, 1, 28, 28], label: [100]}
stdev = 0.01
init_step = []
for v, name, shape in tf.infer_variable_shapes(
cross_entropy, feed_dict=known_shape):
init_step.append(tf.assign(v, tf.normal(shape, stdev)))
print("shape[%s]=%s" % (name, str(shape)))
sess.run(init_step)
sess.run(tf.initialize_all_variables())
# get the mnist dataset
mnist = get_mnist(flatten=False, onehot=False)
print_period = 1000
for epoch in range(10):
sum_loss = 0.0
num_batch = 600
for i in range(num_batch):
batch_xs, batch_ys = mnist.train.next_batch(100)
loss, _ = sess.run([cross_entropy, train_step], feed_dict={x: batch_xs, label:batch_ys})
sum_loss += loss
print("epoch[%d] cross_entropy=%g" % (epoch, sum_loss /num_batch))
correct_prediction = tf.equal(tf.argmax(fc2, 1), label)
accuracy = tf.reduce_mean(correct_prediction)
print(sess.run(accuracy, feed_dict={x: mnist.test.images, label: mnist.test.labels}))
| apache-2.0 |
nelson-liu/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 14 | 2640 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_return_std():
# Test return_std option for both Bayesian regressors
def f(X):
return np.dot(X, w) + b
def f_noise(X, noise_mult):
return f(X) + np.random.randn(X.shape[0])*noise_mult
d = 5
n_train = 50
n_test = 10
w = np.array([1.0, 0.0, 1.0, -1.0, 0.0])
b = 1.0
X = np.random.random((n_train, d))
X_test = np.random.random((n_test, d))
for decimal, noise_mult in enumerate([1, 0.1, 0.01]):
y = f_noise(X, noise_mult)
m1 = BayesianRidge()
m1.fit(X, y)
y_mean1, y_std1 = m1.predict(X_test, return_std=True)
assert_array_almost_equal(y_std1, noise_mult, decimal=decimal)
m2 = ARDRegression()
m2.fit(X, y)
y_mean2, y_std2 = m2.predict(X_test, return_std=True)
assert_array_almost_equal(y_std2, noise_mult, decimal=decimal)
| bsd-3-clause |
0x0all/nupic | examples/opf/experiments/missing_record/simple_0/description.py | 17 | 1725 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/simple_0.csv'),
'windowSize': 25,
'modelParams': {
'sensorParams': {
'verbosity': 0,
'encoders': {
'timestamp_timeOfDay': None,
'timestamp_dayOfWeek': None,
'field2': None,
}
},
'clParams': {
'clVerbosity': 0,
}
}
}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| gpl-3.0 |
0x0all/nupic | py/regions/ImageSensor.py | 2 | 128845 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file defines ImageSensor, an extensible sensor for images.
"""
import os
import re
import sys
import shutil
import inspect
import cPickle as pickle
import copy
from base64 import b64encode, b64decode
from unicodedata import normalize
from PyRegion import PyRegion
import numpy
from PIL import (Image,
ImageChops,
ImageDraw)
from nupic.bindings.math import GetNTAReal
RealNumpyDType = GetNTAReal()
#from nupic.regions.PyRegion import (PyNode,
# NodeSpec,
# NodeSpecItem,
# RealTypeName,
# RealNumpyDType)
#from nupic.regions import PyNode as PyNodeModule
from nupic.image import (serializeImage,
deserializeImage,
imageExtensions)
def containsConvolutionPostFilter(postFilters):
"""Determine if the post filters contain a convolution filter"""
for p in postFilters:
if p[0].endswith('Convolution'):
return True
return False
class ImageSensor(PyRegion):
"""
ImageSensor is an extensible sensor for grayscale and black and white images.
It uses 'filter' and 'explorer' plugins to do advanced image processing and
training.
It loads images either from files on disk or directly from the Numenta tools.
There are several commands for loading images:
- loadSingleImage, for loading a single image file from disk
- loadMultipleImages, for loading multiple image files from disk
- loadSerializedImage, for receiving a serialized image directly
The loadSingleImage and loadMultipleImage commands don't actually load images
into memory until the images are needed. Furthermore, the filters (see below)
are not run until needed. This keeps ImageSensor's memory usage low, making
it possible to use large datasets and run many filters.
There is also a 'memoryLimit' parameter, which caps the total amount of
memory to be used for storing images. ImageSensor will automatically unload
images and filter outputs as necessary to stay within the limit.
ImageSensor does not necessarily present each image to the bottom nodes of
the network once; rather, the explorer plugin dictates the movement of the
sensor across the image. Typically, the sensor will sweep over each image for
many iterations, in order to help the network generate invariance.
The filter plugins are located in regions/ImageSensorFilters. Bundled filters
include scaling, contrast normalization, and Gabor filters. To make a new
filter, subclass BaseFilter (using the other filters as examples), and drop
the new filter in the ImageSensorFilters directory.
The explorer plugins, located in regions/ImageSensorExplorers, control how
the sensor moves through the set of input images, their filtered versions,
and possible locations of the sensor window on the image. You may create new
explorers by subclassing BaseExplorer and putting them in the
ImageSensorExplorers directory. The documentation for the BaseExplorer class
contains detailed information on explorers.
Only the 'width', 'height', and 'depth' parameters need to be set when the
sensor is first constructed, though other parameters may be set at that time.
Some ImageSensor parameters may need to be changed for each level of
level-by-level training. For example, Pictures trains a single node at each
level and then clones the trained node to all other nodes at the same level.
Only some bottom-level nodes are enabled, except when training the top node.
Thus, the Pictures example changes ImageSensor's enabledWidth and
enabledHeight parameters with each level of training. In many cases, users
may wish to change the explorer for each level of training.
Some explorers can calculate ahead of time how many iterations will be
necessary to explorer all the images, though explorers that move randomly
cannot. If you are using one of the deterministic explorers, such as
ExhaustiveSweep, you can use ImageSensor's numIterations parameter to get the
total number of iterations for the loaded images. Changing the explorer or
images will change the number of iterations, so make sure to check the
parameter right before running the network.
The simplest explorer, 'Flash', presents each image once without any
sweeping. If you have trained a network with a different explorer and wish to
perform "flash inference" for testing, just set the explorer to 'Flash'.
All of ImageSensor's public commands and parameters are documented through
its NodeSpec. You can view the NodeSpec with the following Python commands:
from nupic.network import nodeHelp
nodeHelp("py.ImageSensor")
"""
def _init(self, width=1, height=1, depth=1, mode='gray',
blankWithReset=False, background=255, invertOutput=False,
filters=[], postFilters=[], explorer="Flash",
categoryOutputFile="", logText=False, logOutputImages=False,
logOriginalImages=False, logFilteredImages=False,
logLocationImages=False, logLocationOnOriginalImage=False,
logBoundingBox=False, logDir="imagesensor_log",
automaskingTolerance=0, automaskingPadding=0, memoryLimit=100,
minimalBoundingBox=False, dataOut=None, categoryOut=None,
partitionOut=None, resetOut=None, bboxOut=None, alphaOut=None,
useAux=False, auxDataOut=None, auxDataWidth=None, **keywds):
"""
width -- Width of the sensor's output to the network (pixels).
height -- Height of the sensor's output to the network (pixels).
depth -- Optional parameter used to send multiple versions of an image
out at the same time.
mode -- Current options are 'gray' (8-bit grayscale) and 'bw' (1-bit
black and white).
blankWithReset -- ** DEPRECATED ** Whether to send a blank output every
time the explorer generates a reset signal (such as when beginning
a new sweep). Turning on blanks increases the number of iterations.
background -- Pixel value of background, used for padding a cropped
image, and also for finding the bounding box in the absence of a mask.
invertOutput -- Inverts the output of the node (e.g. white pixels
become black).
filters -- List of filters to apply to each image. Each element in
the list should be either a string (just the filter name) or a list
containing both the filter name and a dictionary specifying its
arguments.
explorer -- Explorer (used to move the sensor through the input
space). Specify as a string (just the explorer name) or a list
containing both the explorer name and a dictionary specifying its
arguments.
categoryOutputFile -- Name of file to which to write category number
on each compute (useful for analyzing network accuracy after inference).
logText -- Toggle for verbose logging to imagesensor_log.txt.
logOutputImages -- Toggle for writing each output to disk (as an image)
on each iteration.
logOriginalImages -- Toggle for writing the original, unfiltered version
of the current image to disk on each iteration.
logFilteredImages -- Toggle for writing the intermediate versions of
images to disk as they pass through the filter chain.
logLocationImages -- Toggle for writing an image to disk on each
iteration which shows the location of the sensor window.
logLocationOnOriginalImage -- Whether to overlay the location rectangle
on the original image instead of the filtered image. Does not work if
the two images do not have the same size, and may be nonsensical
even if they do (for example, if a filter moved the object within the
image).
logBoundingBox -- Toggle for writing a log containing the bounding
box information for each output image.
automaskingTolerance -- Affects the process by which bounding box masks
are automatically generated from images based on similarity to the
specified 'background' pixel value. The bounding box will enclose all
pixels in the image that differ from 'background' by more than
the value specified in 'automaskingTolerance'. Default is 0, which
generates bounding boxes that enclose all pixels that differ at all
from the background. In general, increasing the value of
'automaskingTolerance' will produce tighter (smaller) bounding box masks.
automaskingPadding -- Affects the process by which bounding box masks
are automatically generated from images. After computing the
bounding box based on image similarity with respect to the background,
the box will be expanded by 'automaskPadding' pixels in all four
directions (constrained by the original size of the image.)
memoryLimit -- Maximum amount of memory that ImageSensor should use
for storing images, in megabytes. ImageSensor will unload images and
filter outputs to stay beneath this ceiling. Set to -1 for no limit.
minimalBoundingBox -- Whether the bounding box found by looking at the
image background should be set even if it touches one of the sides of
the image. Set to False to avoid chopping edges off certain images, or
True if that is not an issue and you wish to use a sweeping explorer.
dataOut -- The output element count of the 'dataOut' output.
categoryOut -- The output element count of the 'categoryOut' output (NuPIC 1 only).
resetOut -- The output element count of the 'resetOut' output (NuPIC 1 only).
bboxOut -- The output element count of the 'bboxOut' output (NuPIC 1 only).
alphaOut -- The output element count of the 'alphaOut' output (NuPIC 1 only)
auxDataWidth -- The output element count of the 'auxData' output (NuPIC2 only).
"""
PyRegion.__init__(self, **keywds)
# Validate the output element counts
if dataOut:
if hasattr(dataOut, "__iter__"):
if ([1] * (3 - len(dataOut)) + list(dataOut)) == [depth, height, width]:
pass
elif dataOut == (depth * height * width):
pass
else:
if not containsConvolutionPostFilter(postFilters):
raise RuntimeError("The 'dataOut' output element count must be equal"
" to depth * height * width.")
# In NuPIC 2, these are all None
if categoryOut and categoryOut != 1:
raise RuntimeError("The 'categoryOut' output element count must be 1.")
if partitionOut and partitionOut != 1:
raise RuntimeError("The 'partitionOut' output element count must be 1.")
if resetOut and resetOut != 1:
raise RuntimeError("The 'resetOut' output element count must be 1.")
if bboxOut and bboxOut != 4:
raise RuntimeError("The 'bboxOut' output element count must be 4.")
if alphaOut and alphaOut != width * height:
raise RuntimeError("The 'alphaOut' output element count must be equal "
"to width * height")
self.useAux = useAux
self.width = width
self.height = height
self.depth = depth
self.mode = mode
self.blankWithReset = blankWithReset
self.background = background
self.automaskingTolerance = automaskingTolerance
self.automaskingPadding = automaskingPadding
if self.mode == 'bw' and self.background != 0:
self.background = 255
self.invertOutput = invertOutput
self.categoryOutputFile = categoryOutputFile
self.logFile = None
self.bboxLogFile = None
self.logText = logText
self.logOutputImages = logOutputImages
self.logOriginalImages = logOriginalImages
self.logFilteredImages = logFilteredImages
self.logLocationImages = logLocationImages
self.logLocationOnOriginalImage = logLocationOnOriginalImage
self.logBoundingBox = logBoundingBox
self.logDir = logDir
self.memoryLimit = memoryLimit
self.minimalBoundingBox = minimalBoundingBox
self.enabledWidth = self.width
self.enabledHeight = self.height
# The imageList data structure contains all the information about all the
# images which have been loaded via and of the load* methods. Some images
# may not be in memory, but their metadata is always kept in imageList.
# imageList[imageIndex] returns all the information about the image with
# the specified index, in a dictionary. The keys in the dictionary are:
# 'image': The unfiltered image.
# 'imagePath': The path from which the image was loaded.
# 'maskPath': The path from which the mask was loaded.
# 'categoryName': The name of the image's category.
# 'categoryIndex': The index of the image's category.
# 'filtered': A dictionary of filtered images created from this image.
# In general, images are only loaded once they are needed. But if an image
# is loaded via loadSerializedImage, then its entry in imageList has an
# 'image' value but no 'imagePath' value. Thus, it will never be deleted
# from memory because it cannot be recovered. All other images are fair
# game.
# The 'filtered' dictionary requires more explanation. Each key in the
# dictionary is a tuple specifying the positions of the filters that
# generated the image. (Filters can generate multiple outputs, so an
# image that comes out of the filter pipeline must be referenced by its
# position in the outputs of each filter in the pipeline). The dictionary
# also contains images that have been run through only part of the filter
# pipeline, which are kept around for us as inputs for the remaining
# filters.
# Here is an example with 3 filters in the pipeline:
# 0: A Resize filter that generates 3 outputs (small, medium, large)
# 1: An EqualizeHistogram filter that generates 1 output
# 2: A Rotation2D filter that generates 5 outputs (5 rotation angles)
# A typical key for an image would be (0, 0, 2), specifying the smallest
# scale from the Resize filter (0), the only output from the
# EqualizeHistogram filter (0), and the middle rotation angle (2).
# Another valid key would be (1), specifying an image that has gone through
# the Resize filter (the middle scale), but which has not been through
# the other filters yet. This image would neven be shown to the network,
# but it would be used by ImageSensor to compute other images.
# The _getFilteredImages method is the only method which directly accesses
# the filtered images in imageList. Filtering is only done on-demand.
# If _getFilteredImages is called and the requested images have not yet
# been created, _applyFilter is called to run each filter, and the
# resulting images are stored in imageList for later use. They may be
# deleted due to the memoryLimit parameter, in which case they will be
# recreated later if necessary.
self._imageList = []
self.categoryInfo = [] # (categoryName, canonicalImage) for each category
self._imageQueue = [] # Queue of image indices for managing memory
self._filterQueue = [] # Queue of filter outputs for mananging memory
self._pixelCount = 0 # Count of total loaded pixels for mananging memory
self.outputImage = None # Copy of the last image sent to the network
self.locationImage = None # Copy of the location image for the last output
self.prevPosition = None # Position used for the last compute iteration
self._categoryOutputFile = None # To write the category on each iteration
self._iteration = 0 # Internal iteration counter
self.explorer = None
self._setFilters(filters)
self._setPostFilters(postFilters)
self._setExplorer(explorer)
self._holdForOffset = 0
self._cubeOutputs = not containsConvolutionPostFilter(postFilters)
self._auxDataWidth = auxDataWidth
def __init__(self, *args, **kw):
self._init(*args, **kw)
def loadSingleImage(self, imagePath, maskPath=None, categoryName=None,
clearImageList=True, skipExplorerUpdate=False, auxPath=None, userAuxData=None,
sequenceIndex=None, frameIndex=None):
"""
Add the specified image to the list of images.
Images are not loaded into memory until they are needed.
imagePath -- Path to the image to load.
auxPath -- Path to the auxiliary data for the image.
maskPath -- Path to the mask to load with this image.
categoryName -- Name of the category of this image.
clearImageList -- If True, all loaded images are removed before this
image is loaded. If False, this image is appended to the list of
images.
sequenceIndex -- Unique sequence index.
frameIndex -- The frame number within the sequence.
"""
if categoryName is not None and type(categoryName) is not str:
categoryName = str(categoryName)
if clearImageList:
self.clearImageList(skipExplorerUpdate=True)
if userAuxData is not None:
manualAux = True
else:
manualAux = False
self._addImage(imagePath=imagePath, maskPath=maskPath,
categoryName=categoryName, auxPath=auxPath, manualAux = manualAux,
userAuxData=userAuxData, sequenceIndex=sequenceIndex, frameIndex=frameIndex)
if not skipExplorerUpdate:
self.explorer[2].update(numImages=len(self._imageList))
self._logCommand([('index', len(self._imageList)-1)])
if clearImageList:
self.explorer[2].first()
return self.getParameter('numImages'), self.getParameter('numMasks')
def loadSpecificImages(self, imagePaths, categoryNames=None,
clearImageList=True):
"""
Add multiple images to the list of images.
See the loadMultipleImages to load images which have been organized by
category on disk.
This command is equivalent to calling loadSingleImage repeatedly, but it
is faster because it avoids updating the explorer between each image, and
because it only involves one call to the runtime engine.
imagePaths -- List with the path of each image.
categoryNames -- Category name for each image (or can be a single string
with the category name that should be applied to all images).
clearImageList -- If True, all loaded images are removed before this
image is loaded. If False, this image is appended to the list of
images.
"""
if categoryNames is not None and isinstance(categoryNames, basestring):
categoryNames = [categoryNames] * len(imagePaths)
if clearImageList:
self.clearImageList(skipExplorerUpdate=True)
for i in xrange(len(imagePaths)):
if categoryNames is not None:
categoryName = categoryNames[i]
else:
categoryName = None
self.loadSingleImage(imagePath=imagePaths[i],
categoryName=categoryName,
clearImageList=False,
skipExplorerUpdate=True)
self.explorer[2].update(numImages=len(self._imageList))
return self.getParameter('numImages'), self.getParameter('numMasks')
def _walk(self, top):
"""
Directory tree generator lifted from python 2.6 and then
stripped down. It improves on the 2.5 os.walk() by adding
the 'followlinks' capability.
"""
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = os.listdir(top)
except OSError, e:
raise RuntimeError("Unable to get a list of files due to an OS error.\nDirectory: "+top+"\nThis may be due to an issue with Snow Leopard.")
#raise
except:
return
dirs, nondirs = [], []
for name in names:
if os.path.isdir(os.path.join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
yield top, dirs, nondirs
for name in dirs:
path = os.path.join(top, name)
for x in self._walk(path):
yield x
def loadMultipleImages(self, imagePath, extension=None, maskPath=None,
first=None, last=None, subsample=1, clearImageList=True,
strictMaskLocations=True, categoryNameFilter=None, pattern=None,
skipInterval=None, skipOffset=None, useCategories=True, auxPath=None,
auxType=None):
"""
Add images from multiple categories to the list of images.
Images are not loaded into memory until they are needed.
imagePath -- Path from which to load the images (see note below).
auxPath -- Path from which to load the auxiliary data for each image.
auxType -- Type of auxiliary data: the file extension.
extension -- Extension of images files to accept (or None to accept all
valid images).
maskPath -- Path from which to load masks that correspond to the loaded
images (see note below).
first -- Index of the first image in each category to load. If
first == 1, ImageSensor skips the first image and loads the rest.
last -- Index of the last image in each category to load. If
last == 1, ImageSensor loads the first two images.
subsample -- ImageSensor loads 1/subsample of the images in each
category. If subsample == 3, loads the first image, the fourth, the
seventh, and so on.
clearImageList -- If True, ImageSensor removes all loaded images when
loading these new images. If False, the images loaded by this
method will be appended to the existing list of images.
strictMaskLocations -- If True, ImageSensor will only load masks whose
path (in the mask directory) exactly parallels the path to the image
(in the image directory). If False, ImageSensor will attempt to find
the masks even if there aren't category subdirectories, or all the
mask files are in the root mask directory, etc.
categoryNameFilter -- String or list of strings that will be matched
against category names. Only categories that match one of the strings
will be processed. Each string can be a regular expression.
pattern -- Regular expression for filtering images. Only images which
match the regular expression (via re.search()) will be accepted.
The path provided to pattern is the absolute path to the image file.
skipInterval -- The inverse of 'subsample'; this parameter directs
ImageSensor to skip every Nth image that it would otherwise load.
For example, if 'skipInterval' is 2, then ImageSensor will load
only every other image. The default is None, which directs
ImageSensor to skip no images. Note that a 'skipInterval' of 1
implies to skip every image, which is nonsensical; therefore,
non-None values of 'skipInterval' which are less than 2 cause
RuntimeErrors to be raised.
skipOffset -- Operates in conjunction with 'skipInterval'. Specifies
an offset to use for the purpose of skipping. For example, if 'skipInterval'
was 10 (skip every 10th image) and 'skipOffset' was 0 (or None), then
the first 9 images would be loaded, the 10th would be skipped, etc.
But if 'skipOffset' were 2, then the first 7 images would be loaded,
the 8th skipped, the next 9 loaded, the 17th skipped, etc. Defaults
to None (equivalent to zero.) If both 'skipOffset' and 'skipInterval'
are non-None, then 'skipOffset' must be non-negative and less than
'skipInterval'.
useCategories -- True for normal behavior, or False to load any image found
in imagePath, without looking for nested directory folders.
Returns a tuple containing the number of images loaded and the number of
masks loaded.
This method expects a directory structure like the following:
imagePath/categoryName1/image01.ext
image02.ext
categoryName2/image01.ext
image02.ext
Optionally, images can be nested arbitrarily deep. For instance:
imagePath/categoryName1/objectName1/image01.ext
image02.ext
objectName2/image01.ext
image02.ext
categoryName2/objectName1/image01.ext
A depth-first search is performed to find images.
Directories and images are processed in sorted order.
The nested directory structure with category names is necessary, but the
names of the image files are unimportant.
"""
if first is not None and type(first) != int:
raise RuntimeError("'first' must be None or a nonnegative integer")
if last is not None and type(last) != int:
raise RuntimeError("'last' must be None or a nonnegative integer")
if subsample is not None and type(subsample) != int:
raise RuntimeError("'subsample' must be None or a positive integer")
if skipInterval is not None and (type(skipInterval) != int
or skipInterval < 2):
raise RuntimeError("'skipInterval' must be None or an integer >= 2")
if skipOffset is not None and skipInterval is not None and \
(type(skipOffset) != int or skipOffset < 0
or skipOffset >= skipInterval):
raise RuntimeError("'skipOffset' must be None or a non-negative integer "
"< 'skipInterval'")
self._logCommand()
filterLogDir = os.path.join(self.logDir, 'output_from_filters')
if self.logFilteredImages:
if clearImageList and os.path.exists(filterLogDir):
shutil.rmtree(filterLogDir)
if self.filters and not os.path.exists(filterLogDir):
os.makedirs(filterLogDir)
if clearImageList:
self.clearImageList(skipExplorerUpdate=True)
if extension:
# Only look for the extension specified by the user
if not extension.startswith('.'):
extension = '.' + extension
extensions = [extension]
else:
extensions = imageExtensions
# NTA_DATA_DIR may be set in the autotest environment
if "NTA_DATA_DIR" in os.environ and not os.path.abspath(imagePath):
imagePath = os.path.join(os.environ["NTA_DATA_DIR"], imagePath)
print "ImageSensor: looking for data in NTA_DATA_DIR=%s" % os.environ["NTA_DATA_DIR"]
imagePath = os.path.abspath(imagePath)
if auxPath is not None:
if type(auxPath) is not list:
auxPath = [auxPath]
for k in range(0, len(auxPath)):
auxPath[k] = os.path.abspath(auxPath[k])
if maskPath:
maskPath = os.path.abspath(maskPath)
if not os.path.exists(maskPath):
maskPath = None
# Convert 'first', 'last', and 'subsample' to proper Python names/format
start = first
stop = None
if last is not None:
stop = last + 1
step = subsample
# Handle skipping images that would otherwise be loaded
if skipOffset is None:
skipOffset = 0
images = []
categoryList = [None]
if useCategories:
# Assume each directory in imagePath is its own category
categoryList = [c for c in sorted(os.listdir(imagePath))
if c[0] != '.' and
os.path.isdir(os.path.join(imagePath, c))]
if categoryList:
# Filter categories if specified
if categoryNameFilter:
# Need to convert to NFC and re-encode to UTF-8 or else paths may not
# match the category filter
categoryList = [normalize('NFC', unicode(c, 'utf8')).encode('utf8')
for c in categoryList]
if isinstance(categoryNameFilter, basestring):
categoryNameFilter = [categoryNameFilter]
else:
categoryNameFilter = list(categoryNameFilter)
# With a large number of categories, the regular expression
# match can be very expensive. Determine whether there are
# any regular expressions in the filter list. If there are,
# then do the full regex match, otherwise just compare strings
# directly.
hasRegex = False
# use a regex to see if it has any regexes
isTextRegex = re.compile("[a-zA-Z_]+")
hasRegex = False in [isTextRegex.match(r) is not None for r in categoryNameFilter]
if not hasRegex:
categoryList = [c for c in categoryList if c in categoryNameFilter]
else:
for i, r in enumerate(categoryNameFilter):
if r[-1] != '$':
categoryNameFilter[i] += '$'
matchers = [re.compile(r) for r in categoryNameFilter]
categoryList = [c for c in categoryList if True in
[r.match(c) is not None for r in matchers]]
for category in categoryList:
skipCounter = skipOffset
# Call loadSingleImage on every image with the correct extension at any
# depth, using a depth first search
categoryFilenames = []
if category:
walkPath = os.path.join(imagePath, category)
else:
walkPath = imagePath
category = os.path.split(imagePath)[1]
#if float(".".join([str(x) for x in sys.version_info[:2]])) >= 2.6:
# w = os.walk(walkPath, followlinks=True)
#else:
# w = os.walk(walkPath)
w = self._walk(walkPath)
while True:
try:
dirpath, dirnames, filenames = w.next()
except StopIteration:
break
# Don't enter directories that begin with '.'
for d in dirnames[:]:
if d.startswith('.'):
dirnames.remove(d)
dirnames.sort()
# Ignore files that begin with '.'
filenames = [f for f in filenames if not f.startswith('.')]
# Only load images with the right extension
filenames = [f for f in filenames
if os.path.splitext(f)[1].lower() in extensions]
if pattern:
# Filter images with regular expression
filenames = [f for f in filenames
if re.search(pattern, os.path.join(dirpath, f))]
filenames.sort()
imageFilenames = [os.path.join(dirpath, f) for f in filenames]
# Get the corresponding path to the masks
if maskPath:
maskdirpath = os.path.join(maskPath, dirpath[len(imagePath)+1:])
maskFilenames = [os.path.join(maskdirpath, f) for f in filenames]
if strictMaskLocations:
# Only allow masks with parallel filenames
for i, filename in enumerate(maskFilenames):
if not os.path.exists(filename):
maskFilenames[i] = None
else:
# Find the masks even if the path does not match exactly
for i, filename in enumerate(maskFilenames):
while True:
if os.path.exists(filename):
maskFilenames[i] = filename
break
if os.path.split(filename)[0] == maskPath:
# Failed to find the mask
maskFilenames[i] = None
break
# Try to find the mask by eliminating subdirectories
body, tail = os.path.split(filename)
head, body = os.path.split(body)
while not os.path.exists(head):
tail = os.path.join(body, tail)
head, body = os.path.split(head)
filename = os.path.join(head, tail)
else:
maskFilenames = [None for f in filenames]
# Add our new images and masks to the list for this category
categoryFilenames.extend(zip(imageFilenames, maskFilenames))
# We have the full list of filenames for this category
for f in categoryFilenames[start:stop:step]:
skipCounter += 1
if not skipInterval or skipCounter % skipInterval:
images.append((f[0], f[1], category))
# Load all images and masks
if not hasattr(auxType,'__iter__'):
auxType = [auxType]
if not hasattr(auxPath,'__iter__'):
auxPath = [auxPath]
sequenceInfo = self._computeSequenceInfo(images)
for i in xrange(len(images)):
# Generate the auxiliary data path
imageName = images[i][0].split(imagePath)
if auxPath[0] is not None and len(auxPath)>=1:
currentAuxPath = []
for k in range(0, len(auxPath)):
currentAuxPath.append("".join([auxPath[k],imageName[1]+auxType[k]]))
else:
currentAuxPath = None
self.loadSingleImage(imagePath=images[i][0], maskPath=images[i][1],
categoryName=images[i][2], clearImageList=False,
skipExplorerUpdate=True, auxPath=currentAuxPath,
sequenceIndex=sequenceInfo[i][0], frameIndex=sequenceInfo[i][1])
self.explorer[2].update(numImages=len(self._imageList), sequenceCount=sequenceInfo[-1][0], frameCount=len(self._imageList))
return self.getParameter('numImages'), self.getParameter('numMasks')
def _computeSequenceInfo(self, images):
"""
Generates the set of sequence IDs and frameIndexs
for the images in the dataset.
"""
sequenceInfo = []
seqAlias = None
seqID = -1
frameIndex = -1
for image in images:
parentDir = os.path.split(os.path.split(image[0])[0])[1]
frameIndex += 1
if parentDir == image[2]:
seqID += 1
frameIndex = 0
seqAlias = None
elif parentDir != seqAlias:
seqID += 1
frameIndex = 0
seqAlias = parentDir
sequenceInfo.append((seqID, frameIndex))
return sequenceInfo
def loadSerializedImage(self, s, categoryName=None, clearImageList=True,
info=None, erode=None,
userAuxData=None, auxPath=None, manualAux=False):
"""
Receive a serialized image (as a string) and add to the list of images.
s -- Serialized version of the image.
categoryName -- Name of the category of the image.
clearImageList -- If True, all loaded images are removed before this
image is loaded. If False, this image is appended to the list of
images.
info -- an optional dict of attribute-value pairs to insert into the
image's .info field, after deserialization
erode -- Use this value for the erode flag (True or False) rather than
calculating it.
To serialize an image before passing it to this command, do the following:
from nupic.image import serializeImage
s = serializeImage(image)
"""
if clearImageList:
self.clearImageList(skipExplorerUpdate=True)
self._addImage(image=deserializeImage(s, info), categoryName=categoryName,
erode=erode, userAuxData=userAuxData, auxPath=auxPath,
manualAux=manualAux)
self.explorer[2].update(numImages=len(self._imageList))
if clearImageList:
self.explorer[2].first()
self._meetMemoryLimit()
return self.getParameter('numImages'), self.getParameter('numMasks')
def clearImageList(self, skipExplorerUpdate=False):
"""
Clear the list of images.
"""
self._imageList = []
self._imageQueue = []
self._filterQueue = []
self._pixelCount = 0
self.prevPosition = None
if not skipExplorerUpdate:
self.explorer[2].update(numImages=0)
def seek(self, iteration=None, image=None, filters=None, offset=None,
reset=None, sequenceIndex=None, frameIndex=None):
"""
Seek to the specified iteration, image, filter position, or offset.
iteration -- Iteration number.
image -- Image number.
filters -- Tuple specifying a position for each filter.
offset -- Tuple of integers specifying the offset as (x,y).
sequenceIndex -- The sequence to seek to.
frameIndex -- The frame within a sequence to seek to.
Examples:
seek(0) -- Reset to the first iteration.
seek(image=100, filters=(0,0,..,0)) -- Seek to image 100 and position
0 for each filter.
seek(offset=(0,0)) -- Seek to the original position of the current image.
The 'iteration' parameter cannot be combined with the other parameters.
"""
self._logCommand()
# Combine image, filters, and offset into position
position = None
if image is None and sequenceIndex is not None:
image = self.getIterationFromSequence(sequenceIndex, frameIndex)
if image is not None or filters is not None or offset is not None \
or reset is not None:
position = {'image': image, 'filters': filters, 'offset': offset,
'reset': reset}
# Validate inputs
if iteration is not None and position is not None:
raise RuntimeError("Cannot combine 'iteration' with other arguments")
if iteration is None and position is None:
raise RuntimeError("Must specify at least one argument")
if position is not None:
if position['offset'] and type(position['offset']) is tuple:
position['offset'] = list(position['offset'])
if position['image'] is not None:
if position['image'] < 0:
raise RuntimeError("'image' must be nonnegative")
if position['image'] >= len(self._imageList):
raise RuntimeError("'image' exceeds number of loaded images")
if position['filters'] is not None:
if type(position['filters']) != list:
raise RuntimeError("'filters' must be a list of nonnegative values")
if len(position['filters']) != len(self.filters):
raise RuntimeError("Length of 'filters' does not match numFilters")
# Account for holdFor as best we can. This won't be exact because it doesn't take into
# account the current position within the holdFor
if iteration is not None:
self._holdForOffset = iteration % self.explorer[2].holdFor
iteration //= self.explorer[2].holdFor
self.explorer[2].seek(iteration=iteration, position=position)
def getNumIterations(self, image=None):
"""
Calculate how many samples the explorer will provide.
image -- If None, returns the sum of the iterations for all the loaded
images. Otherwise, image should be an integer specifying the image for
which to calculate iterations.
"""
if image is not None and type(image) != int:
raise RuntimeError("'image' must be None or a nonnegative integer")
return self.explorer[2].getNumIterations(image) * self.explorer[2].holdFor
def getSequenceCount(self):
"""
Calculates how many sequences the sensor will provide.
"""
if self._imageList is None:
return -1
else:
return self._imageList[-1]['sequenceIndex']+1
def getFrameCount(self, sequenceIndex):
"""
Calculates the number of frames in a sequence.
"""
if sequenceIndex<0:
raise RuntimeError("'sequenceIndex' must be a non-negative integer.")
if sequenceIndex>self._imageList[-1]['sequenceIndex']:
raise RuntimeError("'sequenceIndex' out of range.")
if self._imageList is None:
return -1
elif sequenceIndex==self._imageList[-1]['sequenceIndex']:
return self._imageList[-1]['frameIndex']+1
else:
ID = 0
while sequenceIndex>=self._imageList[ID]['sequenceIndex']:
ID+=1
return self._imageList[ID-1]['frameIndex']+1
def getIterationRange(self, sequenceIndex=None):
"""
Returns the range of the iteration numbers for
the given sequence ID. If sequenceIndex is None, then
the total range of iterations is returned.
"""
if sequenceIndex is None:
return 0, len(self._imageList)
else:
startIteration = self.getIterationFromSequence(sequenceIndex)
stopIteration = self.getIterationFromSequence(sequenceIndex, self.getFrameCount(sequenceIndex)-1)
return startIteration, stopIteration
def getIterationFromSequence(self, sequenceIndex, frameIndex=0):
"""
Returns the iteration number for the given
frame in the sequence.
"""
if sequenceIndex<0:
raise RuntimeError("'sequenceIndex' must be a non-negative integer.")
if sequenceIndex>self._imageList[-1]['sequenceIndex']:
raise RuntimeError("'sequenceIndex' out of range.")
if self._imageList is None:
return -1
else:
ID = 0
while sequenceIndex>self._imageList[ID]['sequenceIndex']:
ID+=1
while frameIndex>self._imageList[ID]['frameIndex']:
ID+=1
if self._imageList[ID]['sequenceIndex'] != sequenceIndex:
raise RuntimeError("'frameIndex' out of range.")
return ID
def getSequenceFromIteration(self, iteration):
"""
Returns the sequence information given the
iteration number.
"""
if iteration < 0:
raise RuntimeError("'iteration' must be a non-negative integer.")
if iteration>len(self._imageList):
raise RuntimeError("'iteration' out of range.")
else:
return self._imageList[iteration]['sequenceIndex'], self._imageList[iteration]['frameIndex']
def saveImagesToFile(self, filename):
"""
Save imageList, categoryInfo, and filters to the specified file.
Loads all images and runs all filters first.
This method can be used to speed up image loading when expensive filters
are run. After loading images once and passing them through the filters,
use saveImagesToFile to dump the filtered versions to disk. On subsequent
runs with the same images and filters, call loadImagesFromFile to load
the filtered images and avoid rerunning the filters.
"""
# Load all images and run all filters
for i in xrange(len(self._imageList)):
self._applyAllFilters(i)
# Create serializable versions for pickling
sImageList = _serializeImageList(self._imageList)
filters = self.getParameter('filters')
sCategoryInfo = self.getParameter('categoryInfo')
# Pickle serializable objects to file
f = open(filename, 'wb')
pickle.dump((sImageList, filters, sCategoryInfo), f,
protocol=pickle.HIGHEST_PROTOCOL)
f.close()
def loadImagesFromFile(self, filename):
"""
Load from a file created with saveImagesToFile.
Loads imageList and categoryInfo. Also loads the filters used to create
the saved images, and overwrites ImageSensor.filters.
"""
f = open(filename, 'rb')
sImageList, filters, sCategoryInfo = pickle.load(f)
f.close()
self.setParameter('filters', -1, filters)
self._imageList = _deserializeImageList(sImageList)
self.explorer[2].update(numImages=len(self._imageList))
self.setParameter('categoryInfo', -1, sCategoryInfo)
return self.getParameter('numImages'), self.getParameter('numMasks')
def _addImage(self, image=None, imagePath=None, maskPath=None,
categoryName=None, erode=None, userAuxData=None, auxPath=None,
manualAux=False, sequenceIndex=None, frameIndex=None):
"""
Create a dictionary for an image and metadata and add to the imageList.
"""
item = {'image': image,
'imagePath': imagePath,
'auxData': userAuxData,
'auxPath': auxPath,
'manualAux': manualAux,
'maskPath': maskPath,
'erode': True,
'categoryName': categoryName,
'categoryIndex': None,
'partitionID': None,
'filtered': {},
'sequenceIndex': sequenceIndex,
'frameIndex': frameIndex}
self._imageList.append(item)
if erode is not None:
item['erode'] = erode
setErodeFlag = False
else:
setErodeFlag = True
# Look up category index from name
if item['categoryName'] is None:
# Unspecified category
item['categoryName'] = ""
item['categoryIndex'] = -1
else:
# Look up the category in categoryInfo
for i in xrange(len(self.categoryInfo)):
if self.categoryInfo[i][0] == item['categoryName']:
item['categoryIndex'] = i
break
if item['categoryIndex'] is None:
# This is the first image of this category (blank categories ignored)
item['categoryIndex'] = len(self.categoryInfo)
# Load the image in order to use it for categoryInfo
original = self._loadImage(len(self._imageList) - 1, returnOriginal=True,
setErodeFlag=setErodeFlag)
if not image:
self._imageQueue.insert(0, len(self._imageList) - 1)
# Append this category to categoryInfo
self.categoryInfo.append((item['categoryName'], original))
elif image:
# Image is already present, just prepare it
# Not necessary if it was already loaded for categoryInfo
self._loadImage(len(self._imageList) - 1, setErodeFlag=setErodeFlag)
def _loadImage(self, index, returnOriginal=False, setErodeFlag=True, userAuxData=None):
"""
Load an image that exists in the imageList but is not loaded into memory.
index -- Index of the image to load.
returnOriginal -- Whether to return an unmodified version of the image
for categoryInfo.
"""
item = self._imageList[index]
if not item['image']:
# Load the image from disk
f = open(item['imagePath'], 'rb')
item['image'] = Image.open(f)
item['image'].load()
f.close()
# Update the pixel count
self._pixelCount += item['image'].size[0] * item['image'].size[1]
# Extract auxiliary data
if item['manualAux'] is False:
if item['auxPath'] is not None:
if item['auxData'] is None:
# Load the auxiliary data from disk
auxPath = item['auxPath']
numAuxInput = len(auxPath)
for k in range(0,numAuxInput):
if item['auxData'] is None:
item['auxData'] = numpy.fromfile(item['auxPath'][k])
else:
item['auxData'] = numpy.concatenate([item['auxData'],numpy.fromfile(item['auxPath'][k])])
# Extract partition ID if it exists
partitionID = item['image'].info.get('partitionID')
if partitionID is None:
partitionID = -1
item['partitionID'] = int(partitionID)
# Convert to grayscale
if item['image'].mode not in ('L', 'LA'):
if 'A' in item['image'].getbands():
# Convert to grayscale but preserve alpha channel
item['image'] = item['image'].convert('LA')
else:
item['image'] = item['image'].convert('L')
if returnOriginal:
# Keep copy of original image
original = item['image'].copy()
bbox = None
if item['maskPath'] is not None:
# Load the mask image and add it to the image as the alpha channel
# If the image already has an alpha channel, it will be overwritten
f = open(item['maskPath'], 'rb')
mask = Image.open(f)
mask.load()
if mask.mode != 'L':
mask = mask.convert('L')
f.close()
item['image'].putalpha(mask)
elif item['image'].mode != 'LA':
diffImage = ImageChops.difference(item['image'],
ImageChops.constant(item['image'], self.background))
if self.automaskingTolerance:
diffImage = ImageChops.subtract(diffImage,
ImageChops.constant(item['image'],
self.automaskingTolerance))
bbox = diffImage.getbbox()
if not bbox:
bbox = (0, 0, item['image'].size[0], item['image'].size[1])
elif self.automaskingPadding:
bbox = ( max(0, bbox[0] - self.automaskingPadding),
max(0, bbox[1] - self.automaskingPadding),
min(item['image'].size[0], bbox[2] + self.automaskingPadding),
min(item['image'].size[1], bbox[3] + self.automaskingPadding),
)
if not self.minimalBoundingBox:
# Do not use the bounding box found from the background color unless
# it does not touch any of the sides of the image
if not (bbox[0] > 0
and bbox[1] > 0
and bbox[2] < item['image'].size[0]
and bbox[3] < item['image'].size[1]):
# Bounding box was not brought in on all four sides
# Set it back to the full image
bbox = (0, 0, item['image'].size[0], item['image'].size[1])
mask = ImageChops.constant(item['image'], 0)
mask.paste(255, bbox)
item['image'].putalpha(mask)
if setErodeFlag:
# Check if the image has a nonuniform alpha channel
# If so, set the 'erode' option to False, indicating that the alpha
# channel is meaningful and it does not need to be eroded by GaborNode
# to avoid "phantom edges"
# If a bounding box was used to generated the alpha channel, use the box
# directly to avoid the expense of scanning the pixels
if bbox:
# Bounding box was used
# Set to dilate mode if the bounding box doesn't touch any of the edges
if bbox[0] != 0 \
and bbox[1] != 0 \
and bbox[2] != item['image'].size[0] \
and bbox[3] != item['image'].size[1]:
# Nonuniform alpha channel (from bounding box)
item['erode'] = False
else:
extrema = item['image'].split()[1].getextrema()
if extrema[0] != extrema[1]:
# Nonuniform alpha channel
item['erode'] = False
if returnOriginal:
return original
def _applyFilter(self, image, imageIndex, filterIndex):
"""Apply the specified filter to the image."""
filtered = self.filters[filterIndex][2].process(image)
if type(filtered) is not list:
filtered = [filtered]
for i, item in enumerate(filtered):
if type(item) is not list:
filtered[i] = [item]
# Verify that the filter produced the correct number of outputs
outputCount = self.filters[filterIndex][2].getOutputCount()
if type(outputCount) not in (tuple, list):
outputCount = (outputCount, 1)
if len(filtered) != outputCount[0] or \
False in [len(outputs) == outputCount[1] for outputs in filtered]:
raise RuntimeError("The %s filter " % self.filters[filterIndex][0] +
"did not return the correct number of outputs. The number of images " +
"that it returned does not match the return value of the filter's " +
"getOutputCount() method.")
for item in filtered:
for image in item:
# Verify that the image has the correct mode
if image.mode != 'LA':
s = """Filtered image returned by the "%s" filter (index %d) has
illegal mode '%s'. Images must be mode 'LA' (grayscale with alpha
channel containing the mask).""" % (self.filters[filterIndex][0],
filterIndex, image.mode)
if image.mode == 'L':
s += " The filter may have removed the alpha channel."
raise RuntimeError(s)
# Update the pixel count
self._pixelCount += image.size[0] * image.size[1]
if self.logFilteredImages:
# Save filter output to disk
filterLogDir = os.path.join(self.logDir, 'output_from_filters')
path = os.path.join(filterLogDir, '%02d_' % filterIndex +
self.filters[filterIndex][0], '%09d' % imageIndex)
# Create the output directory if it does not exist
if not os.path.exists(path):
os.makedirs(path)
index = 0
pathContents = [x for x in sorted(os.listdir(path)) if re.match('\d', x)]
if pathContents:
index = int(re.match('(\d*)', pathContents[-1]).groups()[0]) + 1
for f in filtered:
if len(f) > 1:
# Simultaneous outputs
for i, image in enumerate(f):
filename = os.path.join(path, '%02d_%02d.png' % (index, i))
image.split()[0].save(filename)
else:
# Single output
filename = os.path.join(path, '%02d.png' % index)
f[0].split()[0].save(filename)
index += 1
return filtered
def _applyPostFilters(self, image, filterIndex=0):
"""
Recursively apply the postFilters to the image and return a list of images.
"""
# Filter the image
raw_output = None
filtered = self.postFilters[filterIndex][2].process(image)
# Handle special case where the post filter wants to control the output
# of the image sensor (e.g convolution post filters)
if type(filtered) is tuple:
assert len(filtered) == 2
raw_output = filtered[1]
assert type(raw_output) == numpy.ndarray
filtered = filtered[0][0]
# Flatten all responses into a single list
if type(filtered) is not list:
# One response
filtered = [filtered]
else:
if type(filtered[0]) is list:
# Simultaneous responses
filtered2 = []
for responses in filtered:
filtered2.extend(responses)
filtered = filtered2
# Verify that the filter produced the correct number of outputs
outputCount = self.postFilters[filterIndex][2].getOutputCount()
if type(outputCount) in (tuple, list):
if len(outputCount) == 1:
outputCount = outputCount[0]
else:
outputCount = outputCount[0] * outputCount[1]
if len(filtered) != outputCount:
raise RuntimeError("%s postFilter " % self.postFilters[filterIndex][0] +
"did not return the correct number of outputs")
for image in filtered:
# Verify that the image has the correct mode
if image.mode != 'LA':
s = """Filtered image returned by the "%s" postFilter (index %d) has
illegal mode '%s'. Images must be mode 'LA' (grayscale with alpha
channel containing the mask).""" % (self.postFilters[filterIndex][0],
filterIndex, image.mode)
if image.mode == 'L':
s += " The filter may have removed the alpha channel."
raise RuntimeError(s)
if self.logFilteredImages:
# Save intermediate outputs to disk
filterLogDir = os.path.join(self.logDir, 'output_from_post_filters')
path = os.path.join(filterLogDir, '%02d_' % filterIndex +
self.postFilters[filterIndex][0])
# Create the output directory if it does not exist
if not os.path.exists(path):
os.makedirs(path)
# Save the images
if len(filtered) > 1:
for i, image in enumerate(filtered):
name = os.path.join(path, "%09d_%02d.png" % (self._iteration, i))
image.save(name)
else:
name = os.path.join(path, "%09d.png" % self._iteration)
filtered[0].save(name)
if filterIndex == len(self.postFilters) - 1:
return filtered, raw_output
# Concatenate all responses into one flat list of simultaneous responses
responses = []
for image in filtered:
response = self._applyPostFilters(image, filterIndex+1)
if raw_output is not None:
assert (response[1] is None) # Only one post-filter can determine raw_output
responses.extend(response[0])
return responses, raw_output
def _applyAllFilters(self, image=None):
"""
Run all filters on the specified image or all images.
"""
numFilterOutputs = self._getNumFilterOutputs(self.filters)
if image is None:
images = xrange(len(self._imageList))
else:
images = [image]
for image in images:
filterPosition = [0] * len(self.filters)
while True:
self._getFilteredImages({'image': image, 'filters': filterPosition})
for i in xrange(len(self.filters)-1, -1, -1):
filterPosition[i] += 1
if filterPosition[i] == numFilterOutputs[i]:
filterPosition[i] = 0
else:
break
if filterPosition == [0] * len(self.filters):
break
def _getOriginalImage(self, index=None):
"""
Get the specified image, loading it if necessary.
index -- Index of the image to retrieve. Retrieves the current image if
not specified.
"""
if index is None:
index = self.explorer[2].position['image']
if not self._imageList[index]['image']:
# Image needs to be loaded
self._loadImage(index)
return self._imageList[index]['image']
def _getFilteredImages(self, position=None):
"""
Get the filtered images specified by the position.
position -- Position to use. Uses current position if not specified.
"""
if not position:
position = self.explorer[2].position
if not self._imageList[position['image']]['image']:
# Image needs to be loaded
self._loadImage(position['image'])
if not self.filters:
# No filters - return original version
return [self._imageList[position['image']]['image']]
# Iterate through the specified list of filter positions
# Run filters as necessary
allFilteredImages = self._imageList[position['image']]['filtered']
filterPosition = tuple()
for filterIndex, pos in enumerate(position['filters']):
filterPosition += (pos,)
if not filterPosition in allFilteredImages:
# Run the filter
if len(filterPosition) > 1:
# Use the first of the simultaneous responses
imageToFilter = allFilteredImages[filterPosition[:-1]][0]
else:
imageToFilter = self._imageList[position['image']]['image']
# Inject the original image path to the Image's info
# dict in case the filter wants to use it.
imageToFilter.info['path'] = self._imageList[position['image']]['imagePath']
newFilteredImages = self._applyFilter(imageToFilter, position['image'],
filterIndex)
for j, image in enumerate(newFilteredImages):
# Store in the dictionary of filtered images
thisFilterPosition = filterPosition[:-1] + (j,)
allFilteredImages[thisFilterPosition] = image
# Update the filter queue
thisFilterTuple = (position['image'], thisFilterPosition)
if thisFilterTuple in self._filterQueue:
self._filterQueue.remove(thisFilterTuple)
self._filterQueue.insert(0, thisFilterTuple)
# Update the queues to mark this image as recently accessed
# Only mark the original image if it could be loaded from disk again
if self._imageList[position['image']]['imagePath']:
if position['image'] in self._imageQueue:
self._imageQueue.remove(position['image'])
self._imageQueue.insert(0, position['image'])
# Mark all precursors to the current filter
for i in xrange(1, len(position['filters']) + 1):
partialFilterTuple = (position['image'], tuple(position['filters'][:i]))
if partialFilterTuple in self._filterQueue:
self._filterQueue.remove(partialFilterTuple)
self._filterQueue.insert(0, partialFilterTuple)
self._meetMemoryLimit()
return allFilteredImages[filterPosition]
def _getImageInfo(self, imageIndex=None):
"""
Get the dictionary of info for the image, excluding actual PIL images.
image -- Image index to use. Uses current position if not specified.
"""
if imageIndex is None:
imageIndex = self.explorer[2].position['image']
item = self._imageList[imageIndex].copy()
item.pop('image')
item.pop('filtered')
return item
def _getOutputImages(self):
"""Get the current image(s) to send out, based on the current position.
A post filter may want to provide the finall output of the node. In
this case it will return a non-None final output that the ImageSensor will
use as the output of the node regardless of the output images.
"""
if self.prevPosition['reset'] and self.blankWithReset:
# Blank
return ([Image.new('LA', (self.enabledWidth, self.enabledHeight))] \
* self.depth, None)
else:
# Get the image(s) to send out
allImages = self._getFilteredImages()
# Calculate a scale factor in each dimension for adjusting the offset
scaleX = [image.size[0] / float(allImages[0].size[0])
for image in allImages]
scaleY = [image.size[1] / float(allImages[0].size[1])
for image in allImages]
offset = self.explorer[2].position['offset']
# Normally, the enabledSize is smaller than the sensor size. But, there are some
# configurations where the user might want to explore in a larger size, then run
# it through a post-filter to get the end sensor size (for example, when using a
# fish-eye post filter). If we detect that the enabledSize is greater than the
# sensor size, then change our crop bounds
dstImgWidth = max(self.width, self.enabledWidth)
dstImgHeight = max(self.height, self.enabledHeight)
# Cut out the relevant part of each image
newImages = []
for i, image in enumerate(allImages):
x = int(offset[0] * scaleX[i])
y = int(offset[1] * scaleY[i])
cropBounds = (max(0, x),
max(0, y),
min(x + dstImgWidth, image.size[0]),
min(y + dstImgHeight, image.size[1]))
croppedImage = image.crop(cropBounds)
newImage = Image.new(croppedImage.split()[0].mode,
(dstImgWidth, dstImgHeight),
self.background)
if newImage.mode == 'L':
newImage.putalpha(Image.new('L', newImage.size))
newImage.paste(croppedImage, (max(0, -x), max(0, -y)))
newImages.append(newImage)
# Crop the shifted images back to the enabled size
croppedImages = [image.crop((0, 0,
int(round(self.enabledWidth * scaleX[i])),
int(round(self.enabledHeight * scaleY[i]))))
for i, image in enumerate(newImages)]
# Filter through the post filters
final_output = None
if self.postFilters:
newCroppedImages = []
for i in xrange(len(croppedImages)):
(responses, raw_output) = self._applyPostFilters(croppedImages[i])
if raw_output is not None:
assert final_output is None
final_output = raw_output
while type(responses[0]) == list:
responses = responses[0]
newCroppedImages.extend(responses)
croppedImages = newCroppedImages
# Check that the number of images matches the depth
if len(croppedImages) != self.depth:
raise RuntimeError("The filters and postFilters created %d images to"
" send out simultaneously, which does not match ImageSensor's"
" depth parameter, set to %d." % (len(croppedImages), self.depth))
# Invert output if necessary
if self.invertOutput:
if croppedImages[0].mode == '1':
croppedImages = [ImageChops.invert(image) for image in croppedImages]
else:
for i, croppedImage in enumerate(croppedImages):
grayscale = croppedImage.split()[0]
alpha = croppedImage.split()[1]
inverted = ImageChops.invert(grayscale)
inverted.putalpha(alpha)
croppedImages[i] = inverted
return (croppedImages, final_output)
def _logCommand(self, reportList=None, argList='auto'):
"""
Print information about the calling command to the ImageSensor log file.
Without arguments, prints the calling command's name and arguments. Add
extra information to print with reportList. If necessary, override the
list of arguments with argList.
reportList -- Extra data to print, as a list of tuples (like an
ordered dictionary).
argList -- Arguments to print, as a list of tuples. Default value
'auto' specifies that they should be obtained automatically.
ImageSensor has a very specific log file format that is machine-readable.
A typical section looks like this:
('seek', {'iteration': 0, 'image': None, 'position': None}, {})
('compute', {}, {'iteration': 0, 'position': {'image': 0,
'filters': [0,0,0], 'offset': [0,0]}, 'isBlank': False})
The log snippet above indicates that the 'seek' command was issued, with
the argument iteration=0. The command executed and has nothing extra to
report. Then the runtime engine called 'compute'. The compute command
reports back that this was iteration 0, and the explorer chose image 0,
filter position [0,0,0], and offset [0,0].
Since each call generates one line of properly-formatted Python code, it
is easy to read in the report file and parse it with a Python script.
Does not print if self.logText is False. Opens the file if necessary.
"""
if not self.logText:
return
# Set up the log directory and log file if necessary
if not os.path.exists(self.logDir):
os.makedirs(self.logDir)
if self.logFile is None:
self.logFile = open(os.path.join(self.logDir, 'imagesensor_log.txt'), 'w')
# Get the caller's name
callerInfo = inspect.stack()[1]
callerName = callerInfo[3]
# Automatically get the caller's arguments, unless they were specified
if argList == 'auto':
callerFrame = callerInfo[0]
callerArgs, a, k, callerLocals = inspect.getargvalues(callerFrame)
argList = [(name, callerLocals[name]) for name in callerArgs
if name != 'self']
# Create strings to print
# argList and reportList are lists of tuples
# Convert each into a string form of a dictionary, but preserve the order
argStr = reportStr = {}
if argList:
argStr = '{'
for key, value in argList:
argStr += "'%s': %s, " % (key, repr(value))
argStr = argStr[:-2] + '}'
if reportList:
reportStr = '{'
for key, value in reportList:
reportStr += "'%s': %s, " % (key, repr(value))
reportStr = reportStr[:-2] + '}'
# Print to the file
print >>self.logFile, '(%s, %s, %s)' \
% (repr(callerName), argStr, reportStr) + os.linesep
self.logFile.flush()
def _logOutputImages(self):
"""
Save the output images to disk.
"""
# Create output directory if necessary
outputLogDir = os.path.join(self.logDir, 'output_to_network')
if not os.path.exists(outputLogDir):
os.makedirs(outputLogDir)
# Save the sensor's output images
if self.depth > 1:
for i in xrange(self.depth):
outputImageName = "%09d_%02d.png" % (self._iteration, i)
name = os.path.join(outputLogDir, outputImageName)
self.outputImage[i].split()[0].save(name)
else:
outputImageName = "%09d.png" % self._iteration
name = os.path.join(outputLogDir, outputImageName)
self.outputImage.split()[0].save(name)
def _logBoundingBox(self, bbox):
"""
Log the current bounding box
"""
# Create the log directory and log file if necessary
if not os.path.exists(self.logDir):
os.makedirs(self.logDir)
if self.bboxLogFile is None:
self.bboxLogFile = open(os.path.join(self.logDir, 'imagesensor_bbox_log.txt'), 'w')
# Log the bounding box
print >>self.bboxLogFile, '%d %d %d %d' % (bbox[0], bbox[1], bbox[2], bbox[3])
self.bboxLogFile.flush()
def _logOriginalImage(self):
"""
Save the original, unfiltered image to disk.
"""
# Create output directory if necessary
originalLogDir = os.path.join(self.logDir, 'original_images')
if not os.path.exists(originalLogDir):
os.makedirs(originalLogDir)
# Save the original image
originalImageName = "%09d.png" % self._iteration
image = self._getOriginalImage().split()[0]
image.save(os.path.join(originalLogDir, originalImageName))
def _logLocationImage(self):
"""
Save the location of the sensor window to disk (as an image).
"""
# Create output directory if necessary
locationLogDir = os.path.join(self.logDir, 'output_locations')
if not os.path.exists(locationLogDir):
os.makedirs(locationLogDir)
# Save the location image
if not self.locationImage:
self.locationImage = self._createLocationImage()
locationImageName = "%09d.png" % self._iteration
self.locationImage.save(os.path.join(locationLogDir, locationImageName))
def _createLocationImage(self):
"""
Create the 'location' image, with a rectangle around the sensor window.
"""
if self.prevPosition['reset'] and self.blankWithReset:
# Create a blank image
locationImage = Image.new('1', (self.width, self.height))
if self.invertOutput:
locationImage = ImageChops.invert(locationImage)
else:
# Get the filtered image
firstImage = self._getFilteredImages(self.prevPosition)[0]
# Select backdrop upon which sensor position will be overlaid
if self.logLocationOnOriginalImage:
filteredImage = firstImage
firstImage = self._getOriginalImage(self.prevPosition['image'])
if firstImage.size != filteredImage.size:
raise RuntimeError("logLocationOnOriginalImage is True, but the"
" filtered image does not match the size of the original"
" image, so the location image would be invalid")
locationImage = Image.new('RGB', firstImage.size)
locationImage.paste(firstImage, (0,0))
locationImageDraw = ImageDraw.Draw(locationImage)
x, y = self.prevPosition['offset']
x2, y2 = x + self.enabledWidth - 1, y + self.enabledHeight - 1
locationImageDraw.rectangle((x-1, y-1, x2+1, y2+1), outline='red')
if locationImage.size[0] > 32 or locationImage.size[1] > 32:
# Draw again to create a thicker border
locationImageDraw.rectangle((x-2, y-2, x2+2, y2+2), outline='red')
return locationImage
def _writeCategoryToFile(self, category):
"""
Write the specified category index to the file at self.categoryOutputFile.
category -- Category index (integer).
"""
if self.categoryOutputFile: # Only write if we have a valid filename
if not self._categoryOutputFile:
self._categoryOutputFile = open(self.categoryOutputFile, 'w')
# Write a 1 to the first line to specify one entry per line
self._categoryOutputFile.write('1' + os.linesep)
self._categoryOutputFile.write(str(category) + os.linesep)
self._categoryOutputFile.flush()
def _setFilters(self, filters):
"""
Change one or more filters, and recompute the ones that changed.
filters -- List of filters, where each filter is a list [classname,
parameters] (or just a string with the class name, if the filter needs
no parameters).
Filters should be located in the regions/ImageSensorFilters directory.
"""
if filters:
if not isinstance(filters, list):
raise TypeError("'filters' must be a list of one or more filters")
if isinstance(filters, list) and len(filters) == 2 \
and isinstance(filters[1], dict):
raise TypeError("'filters' must be a _list_ of filters. If you "
"are specifying a filter with the [name, {args}] "
"syntax, nest it within a list: [[name, {args}]]")
filters = copy.deepcopy(filters)
if self.logFilteredImages:
# Remove the filter log directory if it exists
filterLogDir = os.path.join(self.logDir, 'output_from_filters')
if os.path.exists(filterLogDir):
shutil.rmtree(filterLogDir)
if filters is None:
filters = []
elif type(filters) is tuple:
filters = list(filters)
for i, filter in enumerate(filters):
if type(filter) is str:
filters[i] = [filter, {}]
elif type(filter) is tuple:
filters[i] = list(filters[i])
if len(filters[i]) == 1:
filters[i].append({})
self.filters = filters
self._importFilters(self.filters)
# Validate no filter except the last returns simultaneous responses
for i in xrange(len(self.filters)-1):
outputCount = self.filters[i][2].getOutputCount()
if type(outputCount) in (tuple, list) and len(outputCount) > 1 \
and outputCount[1] > 1:
raise RuntimeError("Only the last filter can return a nested list of "
"images (multiple simultaneous responses). "
"The %s filter, " % self.filters[i][0] +
"index %d of %d, " % (i, len(self.filters)-1) +
"creates %d simultaneous responses." % outputCount[1])
# Invalidate the filtered versions of all images
for item in self._imageList:
if item['filtered']:
item['filtered'] = {}
self._filterQueue = []
# Update the pixel count to only count to the original images
self._pixelCount = 0
for i in self._imageQueue:
image = self._imageList[i]['image']
self._pixelCount += image.size[0] * image.size[1]
# Tell the explorer about these new filters
if type(self.explorer) == list and len(self.explorer) > 2:
self.explorer[2].update(numFilters=len(filters),
numFilterOutputs=self._getNumFilterOutputs(self.filters))
def _setPostFilters(self, postFilters):
"""
Change one or more postFilters, and recompute the ones that changed.
postFilters -- List of filters, where each filter is a list
[classname, parameters] (or just a string with the class name, if the
filter needs no parameters).
Filters should be located in the regions/ImageSensorFilters directory.
"""
if postFilters:
if not isinstance(postFilters, list):
raise TypeError("'postFilters' must be a list of one or more filters")
if isinstance(postFilters, list) and len(postFilters) == 2 \
and isinstance(postFilters[1], dict):
raise TypeError("'postFilters' must be a _list_ of filters. If you "
"are specifying a filter with the [name, {args}] "
"syntax, nest it within a list: [[name, {args}]]")
postFilters = copy.deepcopy(postFilters)
if postFilters is None:
postFilters = []
elif type(postFilters) is tuple:
postFilters = list(postFilters)
for i, filter in enumerate(postFilters):
if type(filter) is str:
postFilters[i] = [filter, {}]
elif type(filter) is tuple:
postFilters[i] = list(postFilters[i])
if len(postFilters[i]) == 1:
postFilters[i].append({})
self.postFilters = postFilters
self._importFilters(self.postFilters)
def _getNumFilterOutputs(self, filters):
"""
Return the number of outputs for each filter.
Ignores simultaneous outputs.
"""
numFilterOutputs = []
for f in filters:
n = f[2].getOutputCount()
if type(n) in (tuple, list):
numFilterOutputs.append(n[0])
elif type(n) is int:
numFilterOutputs.append(n)
else:
raise RuntimeError("%s filter must return an int or a " % f[0]
+ "list/tuple of two ints from getOutputCount()")
return numFilterOutputs
def _importFilters(self, filters):
"""
Import and instantiate all the specified filters.
This method lives on its own so that it can be used by both _setFilters
and _setPostFilters.
"""
for i in xrange(len(filters)):
# Import the filter
# If name is just the class name, such as 'PadToFit', we assume the same
# name for the module: names = ['PadToFit', 'PadToFit']
# If name is of the form 'ModuleName.ClassName' (useful to try multiple
# versions of the same filter): names = ['ModuleName', 'ClassName']
# By default, ImageSensor searches for filters in
# nupic.regions.ImageSensorFilters. If the import fails, it tries the
# import unmodified - so you may use filters that are located anywhere
# that Python knows about.
if not '.' in filters[i][0]:
moduleName = className = filters[i][0]
else:
components = filters[i][0].split('.')
moduleName = '.'.join(components[:-1])
className = components[-1]
try:
# Search in ImageSensorFilters first
filterModule = __import__('nupic.regions.ImageSensorFilters.%s'
% moduleName, {}, {}, className)
except:
try:
filterModule = __import__(moduleName, {}, {}, className)
except:
raise RuntimeError('Could not find filter "%s"' % filters[i][0])
filterClass = getattr(filterModule, className)
# Instantiate the filter
filters[i].append(filterClass(**copy.deepcopy(filters[i][1])))
filters[i][2].update(mode=self.mode, background=self.background)
def _setExplorer(self, explorer):
"""
Set the explorer (algorithm used to explore the input space).
explorer -- List containing the explorer name and parameters.
"""
if explorer is None:
raise RuntimeError("Must specify explorer (try 'Flash' for no sweeping)")
explorer = copy.deepcopy(explorer)
if type(explorer) is str:
explorer = [explorer, {}]
elif type(explorer) is tuple:
explorer = list(explorer)
if len(explorer) == 1:
explorer.append({})
# Import the explorer
# If name is just the class name, such as 'Flash', we assume the same
# name for the module: names = ['Flash', 'Flash']
# If name is of the form 'ModuleName.ClassName' (useful to try multiple
# versions of the same explorer): names = ['ModuleName', 'ClassName']
# By default, ImageSensor searches for explorers in
# nupic.regions.ImageSensorExplorers. If the import fails, it tries the
# import unmodified - so you may use explorers that are located anywhere
# that Python knows about.
if not '.' in explorer[0]:
moduleName = className = explorer[0]
else:
components = explorer[0].split('.')
moduleName = '.'.join(components[:-1])
className = components[-1]
try:
# Search in ImageSensorExplorers first
explorerModule = __import__('nupic.regions.ImageSensorExplorers.%s'
% moduleName, {}, {}, className)
except ImportError:
try:
explorerModule = __import__(moduleName, {}, {}, className)
except ImportError:
raise RuntimeError('Could not find explorer "%s"' % explorer[0])
explorerClass = getattr(explorerModule, className)
explorerArgs = copy.deepcopy(explorer[1])
# Append the image accessor methods to the arguments
explorerArgs.update({
'getOriginalImage': self._getOriginalImage,
'getFilteredImages': self._getFilteredImages,
'getImageInfo': self._getImageInfo
})
# Instantiate the explorer
self.explorer = explorer
self.explorer.append(explorerClass(**explorerArgs))
self.explorer[2].update(numImages=len(self._imageList),
numFilters=len(self.filters),
numFilterOutputs=self._getNumFilterOutputs(self.filters),
enabledWidth=self.enabledWidth, enabledHeight=self.enabledHeight,
blankWithReset=self.blankWithReset)
def _meetMemoryLimit(self):
"""
Unload images as necessary to stay within the memory limit.
"""
if self.memoryLimit < 0:
return
while self._pixelCount * 4 / 1000000.0 > self.memoryLimit:
if len(self._filterQueue) > 1:
# Unload the filtered image used least recently
imageIndex, filterPosition = self._filterQueue.pop()
filtered = self._imageList[imageIndex]['filtered'][filterPosition]
for i in xrange(len(filtered)):
self._pixelCount -= filtered[i].size[0] * filtered[i].size[1]
self._imageList[imageIndex]['filtered'].pop(filterPosition)
elif self._imageQueue:
if len(self._imageQueue) == 1 and not self.filters:
# No filters and this is the current image - don't unload it
break
# Unload the original image used least recently
imageIndex = self._imageQueue.pop()
size = self._imageList[imageIndex]['image'].size
self._pixelCount -= size[0] * size[1]
self._imageList[imageIndex]['image'] = None
else:
break
def _updatePrevPosition(self):
"""
Deep copy position to self.prevPosition.
"""
position = self.explorer[2].position
self.prevPosition = {
'image': position['image'],
'filters': copy.copy(position['filters']),
'offset': copy.copy(position['offset']),
'reset': position['reset']
}
def compute(self, inputs=None, outputs=None):
"""
Generate the next sensor output and send it out.
This method is called by the runtime engine.
"""
#from dbgp.client import brk; brk(port=9019)
if len(self._imageList) == 0:
raise RuntimeError("ImageSensor can't run compute: no images loaded")
# Check to see if the new image belongs to a new sequence, if so force Reset
prevPosition = self.prevPosition
if prevPosition is not None:
prevSequenceID = self._imageList[prevPosition['image']]['sequenceIndex']
else:
prevSequenceID = None
self._updatePrevPosition()
newPosition = self.prevPosition
if newPosition is not None:
newSequenceID = self._imageList[newPosition['image']]['sequenceIndex']
else:
newSequenceID = None
if newSequenceID != prevSequenceID:
self.prevPosition['reset'] = True
# Get the image(s) to send out
outputImages, final_output = self._getOutputImages()
# Compile information about this iteration and log it
imageInfo = self._getImageInfo()
if imageInfo['imagePath'] is None:
filename = ""
else:
filename = os.path.split(imageInfo['imagePath'])[1]
category = imageInfo['categoryIndex']
if category == -1:
categoryName = ""
else:
categoryName = self.categoryInfo[category][0]
self._logCommand([
('iteration', self._iteration),
('position', self.explorer[2].position),
('filename', filename),
('categoryIndex', category),
('categoryName', categoryName),
('erode', imageInfo['erode']),
('blank', bool(self.prevPosition['reset'] and self.blankWithReset))
], None)
# If we don't have a partition ID at this point (e.g., because
# of memory limits), then we need to try and pull from the
# just-loaded image
if imageInfo['partitionID'] is None:
imgPosn = self.explorer[2].position['image']
imageInfo['partitionID'] = self._imageList[imgPosn].get('partitionID')
if self.depth == 1:
self.outputImage = outputImages[0]
else:
self.outputImage = outputImages
# Invalidate the old location image
self.locationImage = None
# Log the images and locations if specified
if self.logOutputImages:
self._logOutputImages()
if self.logOriginalImages:
self._logOriginalImage()
if self.logLocationImages:
self._logLocationImage()
holdFor = self.explorer[2].holdFor
self._holdForOffset += 1
if self._holdForOffset >= holdFor:
self._holdForOffset = 0
self.explorer[2].next()
self._iteration += 1
# Save category to file
self._writeCategoryToFile(category)
if outputs:
# Convert the output images to a numpy vector
croppedArrays = [numpy.asarray(image.split()[0], RealNumpyDType)
for image in outputImages]
# Pad the images to fit the full output size if necessary generating
# a stack of images, each of them self.width X self.height
pad = self._cubeOutputs and \
(self.depth > 1 or \
croppedArrays[0].shape != (self.height, self.width))
if pad:
fullArrays = [numpy.zeros((self.height, self.width), RealNumpyDType)
for i in xrange(self.depth)]
for i in xrange(self.depth):
fullArrays[i][:croppedArrays[i].shape[0],:croppedArrays[i].shape[1]] \
= croppedArrays[i]
else:
fullArrays = croppedArrays
# Flatten and concatenate the arrays
outputArray = numpy.concatenate([a.flat for a in fullArrays])
# Send black and white images as (0, 1) instead of (0, 255)
if self.mode == 'bw':
outputArray /= 255
# dataOut - main output
if final_output is None:
outputs['dataOut'][:] = outputArray
else:
outputs['dataOut'][:] = final_output
# categoryOut - category index
outputs['categoryOut'][:] = \
numpy.array([float(category)], RealNumpyDType)
# auxDataOut - auxiliary data
auxDataOut = imageInfo['auxData']
if auxDataOut is not None:
outputs['auxDataOut'][:] = auxDataOut
# resetOut - reset flag
if 'resetOut' in outputs:
outputs['resetOut'][:] = \
numpy.array([float(self.prevPosition['reset'])],RealNumpyDType)
# bboxOut - bounding box
if 'bboxOut' in outputs and len(outputs['bboxOut']) == 4:
bbox = outputImages[0].split()[1].getbbox()
if bbox is None:
bbox = (0, 0, 0, 0)
outputs['bboxOut'][:] = numpy.array(bbox, RealNumpyDType)
# Optionally log the bounding box information
if self.logBoundingBox:
self._logBoundingBox(bbox)
# alphaOut - alpha channel
if 'alphaOut' in outputs and len(outputs['alphaOut']) > 1:
alphaOut = \
numpy.asarray(outputImages[0].split()[1], RealNumpyDType).flatten()
if not imageInfo['erode']:
# Change the 0th element of the output to signal that the alpha
# channel should be dilated, not eroded
alphaOut[0] = -alphaOut[0] - 1
outputs['alphaOut'][:alphaOut.shape[0]] = alphaOut
# partitionOut - partition ID (defaults to zero)
if 'partitionOut' in outputs:
partition = imageInfo.get('partitionID')
if partition is None:
partition = 0
outputs['partitionOut'][:] = \
numpy.array([float(partition)], RealNumpyDType)
def getParameter(self, parameterName, index=-1):
"""Get the value of an ImageSensor parameter."""
if parameterName == 'filters':
# Remove filter objects
return [filter[:2] for filter in self.filters]
elif parameterName == 'postFilters':
# Remove filter objects
return [filter[:2] for filter in self.postFilters]
elif parameterName == 'explorer':
# Remove explorer object
return self.explorer[:2]
elif parameterName == 'numImages':
return len(self._imageList)
elif parameterName == 'numMasks':
return len([True for x in self._imageList if x['maskPath']])
elif parameterName in ('numIterations', 'maxOutputVectorCount'):
return self.getNumIterations()
elif parameterName == 'activeOutputCount':
return self.width * self.height * self.depth
elif parameterName == 'position':
return self.explorer[2].position
elif parameterName == 'imageInfo':
return [self._getImageInfo(i) for i in xrange(len(self._imageList))]
elif parameterName == 'prevImageInfo':
if self.prevPosition and self._imageList:
return self._getImageInfo(self.prevPosition['image'])
else:
return None
elif parameterName == 'nextImageInfo':
if self.explorer[2].position and self._imageList:
return self._getImageInfo()
else:
return None
elif parameterName == 'categoryInfo':
return serializeCategoryInfo(self.categoryInfo)
elif parameterName == 'outputImage':
if self._iteration == 0:
return
if self.depth == 1:
return serializeImage(self.outputImage.split()[0])
else:
return [serializeImage(image.split()[0]) for image in self.outputImage]
elif parameterName == 'outputImageWithAlpha':
if self._iteration == 0:
return
if self.depth == 1:
return serializeImage(self.outputImage)
else:
return [serializeImage(image) for image in self.outputImage]
elif parameterName == 'originalImage':
if not self._imageList or self._iteration == 0:
return
return serializeImage(
self._getOriginalImage(self.prevPosition['image']).split()[0])
elif parameterName == 'locationImage':
if not self._imageList or self._iteration == 0 or not self.prevPosition:
return
if not self.locationImage:
self.locationImage = self._createLocationImage()
return serializeImage(self.locationImage)
elif parameterName == 'background':
if self.mode == 'bw':
return self.background / 255
else:
return self.background
elif parameterName =='auxData':
auxData = [numpy.array(imageList['auxData']) for imageList in self._imageList]
return auxData
elif parameterName == 'sequenceCount':
return self.getSequenceCount()
elif parameterName == 'metadata':
metadata = dict()
# Compute the position relative to center
imageIdx = self.prevPosition['image']
image = self._getOriginalImage(imageIdx)
centerX = (image.size[0] - self.enabledWidth) / 2
centerY = (image.size[1] - self.enabledHeight) / 2
(posX, posY) = self.prevPosition['offset']
metadata['posnY'] = centerY - posY
metadata['posnX'] = centerX - posX
metadata['catIndex'] = self._getImageInfo(imageIdx)['categoryIndex']
metadata['catName'] = self.categoryInfo[metadata['catIndex']][0]
return str(metadata)
else:
return PyRegion.getParameter(self, parameterName, index)
def setParameter(self, parameterName, index, parameterValue):
"""Set the value of an ImageSensor parameter."""
if parameterName == 'filters':
self._setFilters(parameterValue)
elif parameterName == 'postFilters':
self._setPostFilters(parameterValue)
elif parameterName == 'explorer':
self._setExplorer(parameterValue)
elif parameterName == 'enabledWidth':
self.enabledWidth = parameterValue
self.explorer[2].update(enabledWidth=parameterValue)
elif parameterName == 'enabledHeight':
self.enabledHeight = parameterValue
self.explorer[2].update(enabledHeight=parameterValue)
elif parameterName == 'width':
self.width = parameterValue
elif parameterName == 'height':
self.height = parameterValue
elif parameterName == 'blankWithReset':
self.blankWithReset = parameterValue
self.explorer[2].update(blankWithReset=parameterValue)
elif parameterName == 'categoryOutputFile':
if self._categoryOutputFile:
self._categoryOutputFile.close()
self._categoryOutputFile = None
self.categoryOutputFile = parameterValue
elif parameterName == 'categoryInfo':
self.categoryInfo = deserializeCategoryInfo(parameterValue)
# TODO change the names and indices of the loaded image?
elif parameterName == 'background':
self.background = parameterValue
if self.mode == 'bw':
self.background *= 255
for filter in self.filters + self.postFilters:
filter[2].update(background=self.background)
elif parameterName == 'logDir':
if self.logFile is not None and self.logDir != parameterValue:
self.logFile.close()
self.logFile = None
if self.bboxLogFile is not None and self.logDir != parameterValue:
self.bboxLogFile.close()
self.bboxLogFile = None
self.logDir = parameterValue
elif parameterName == 'logText':
self.logText = parameterValue
if self.logFile is not None and not self.logText:
self.logFile.close()
self.logFile = None
elif parameterName == 'memoryLimit':
self.memoryLimit = parameterValue
self._meetMemoryLimit()
else:
if not hasattr(self, parameterName):
raise Exception("%s is not a valid parameter of the ImageSensor" \
% parameterName)
setattr(self, parameterName, parameterValue)
def __getstate__(self):
"""Get serializable state."""
# Serialize images stored in categoryInfo
serializedCategoryInfo = [(name, b64encode(imageStr)) for name, imageStr
in self.getParameter('categoryInfo')]
# Get the object-less filters and explorer
resetFilters = self.getParameter('filters')
resetPostFilters = self.getParameter('postFilters')
resetExplorer = self.getParameter('explorer')
# Compile a dictionary of attributes to save
state = dict()
for name in ['width', 'height', 'depth', 'mode',
'blankWithReset', 'enabledWidth', 'enabledHeight', 'invertOutput',
'background', 'automaskingTolerance', 'automaskingPadding',
'memoryLimit', 'minimalBoundingBox', '_cubeOutputs', '_auxDataWidth']:
state[name] = getattr(self, name)
# Add attributes that have been manipulated
state.update({'serializedCategoryInfo': serializedCategoryInfo,
'resetExplorer': resetExplorer, 'resetFilters': resetFilters,
'resetPostFilters': resetPostFilters})
# Save a version number
state['version'] = 1.7
return state
def __setstate__(self, state):
"""Set state from serialized state."""
# Register a global variable for scanning or other tomfoolery
#PyNodeModule.nodes = getattr(PyNodeModule, 'nodes', []) + [self]
if type(state) is tuple:
raise RuntimeError("Cannot convert legacy ImageSensor state")
# Get the version number
version = state.pop('version')
# Get attributes that need to be manipulated
serializedCategoryInfo = state.pop('serializedCategoryInfo')
resetFilters = state.pop('resetFilters')
resetPostFilters = state.pop('resetPostFilters')
resetExplorer = state.pop('resetExplorer')
for name in state:
setattr(self, name, state[name])
# Deserialize images stored in categoryInfo (not base64-encoded)
if version >= 1.64:
# Undo base64 encoding
serializedCategoryInfo = [(name, b64decode(imageStr)) for name, imageStr
in serializedCategoryInfo]
self.setParameter('categoryInfo', -1, serializedCategoryInfo)
# Set variables that weren't saved
self._imageList = []
self._imageQueue = []
self._filterQueue = []
self._pixelCount = 0
self._iteration = 0
self.logFile = None
self.bboxLogFile = None
self.logText = False
self.logOutputImages = False
self.logOriginalImages = False
self.logFilteredImages = False
self.logLocationImages = False
self.logLocationOnOriginalImage = False
self.logBoundingBox = False
self.logDir = "imagesensor_log"
self.categoryOutputFile = None
self._categoryOutputFile = None
self.outputImage = None
self.locationImage = None
self.prevPosition = None
# Set up the filters and explorer
self.explorer = None
self._setFilters(resetFilters)
self._setPostFilters(resetPostFilters)
self._setExplorer(resetExplorer)
self._cubeOutputs = not containsConvolutionPostFilter(resetPostFilters)
# Backward compatibility
if version < 1.63:
if not hasattr(self, 'automaskingTolerance'):
self.automaskingTolerance = 0
if not hasattr(self, 'automaskingPadding'):
self.automaskingPadding = 0
if not hasattr(self, '_holdForOffset'):
self._holdForOffset = 0
if not hasattr(self, '_auxDataWidth'):
self._auxDataWidth = 0
if version < 1.65:
# Set to True, the old behavior, though it is set to False by default
# in new networks
self.minimalBoundingBox = True
@classmethod
def getSpec(cls):
"""Return the Spec for this Region."""
ns = dict(
description=ImageSensor.__doc__,
singleNodeOnly=False,
inputs = {},
outputs = dict(
dataOut=dict(
description="""Pixels of the image.""",
dataType='Real32',
count=1,
regionLevel=False,
isDefaultOutput=True),
categoryOut=dict(
description="""Index of the current image's category.""",
dataType='Real32',
count=1,
regionLevel=True,
isDefaultOutput=False),
resetOut=dict(
description="""Boolean reset output.""",
dataType='Real32',
count=1,
regionLevel=True,
isDefaultOutput=False),
bboxOut=dict(
description="""Bounding box output (4-tuple).""",
dataType='Real32',
count=4,
regionLevel=True,
isDefaultOutput=False),
alphaOut=dict(
description="""Alpha channel output.""",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
partitionOut=dict(
description="""Index of the leave-one-out partition associated with the current image.""",
dataType='Real32',
count=1,
regionLevel=True,
isDefaultOutput=False),
auxDataOut=dict(
description="""Auxiliary data sent directly to the classifier.""",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
),
parameters = dict(
outputImageWithAlpha=dict(
description="""Serialized version of the current output image(s) with the alpha channel.
If depth > 1, multiple serialized images will be returned in a list. To deserialize:
from nupic.image import deserializeImage
outputImage = deserializeImage(sensor.getParameter('outputImageWithAlpha'))""",
dataType='Byte',
count=0,
constraints='',
accessMode='Read'
),
originalImage=dict(
description="""Serialized version of the original, unfiltered version of the
current image. To deserialize:
from nupic.image import deserializeImage
originalImage = deserializeImage(sensor.getParameter('originalImage'))""",
dataType='Byte',
count=0,
constraints='',
accessMode='Read'
),
locationImage=dict(
description="""Serialized version of the current 'location image', which shows the
position of the sensor overlaid on the filtered image (optionally, the
original image). To deserialize:
from nupic.image import deserializeImage
locationImage = deserializeImage(sensor.getParameter('locationImage'))""",
dataType='Byte',
count=0,
constraints='',
accessMode='Read'
),
height=dict(
description="""Height of the image, in pixels.""",
dataType='UInt32',
count=1,
constraints='interval: [1, ...]',
accessMode='ReadWrite'
),
automaskingPadding=dict(
description="""Affects the process by which bounding box masks
are automatically generated from images. After computing the
bounding box based on image similarity with respect to the background,
the box will be expanded by 'automaskPadding' pixels in all four
directions (constrained by the original size of the image.)""",
dataType='UInt32',
count=1,
constraints='interval: [0, ...]',
accessMode='ReadWrite'
),
numMasks=dict(
description="""Number of masks that the sensor has loaded.""",
dataType='UInt32',
count=1,
constraints='',
accessMode='Read'
),
filters=dict(
description="""List of filters to apply to each image. Each element in the
list should be either a string (just the filter name) or a list containing
both the filter name and a dictionary specifying its arguments.""",
dataType='Byte',
count=0,
constraints='',
accessMode='ReadWrite'
),
logOutputImages=dict(
description="""Toggle for writing each output to disk (as an image)
on each iteration.""",
dataType='bool',
count=1,
constraints='bool',
accessMode='ReadWrite'
),
prevPosition=dict(
description="""The position of the sensor from the *previous* compute, as a
dictionary. Because "outputImage" and "locationImage" match the output of the
previous compute (not the upcoming one), they do not correlate with the
"position" parameter; use this parameter instead.""",
dataType='Byte',
count=0,
constraints='',
accessMode='Read'
),
minimalBoundingBox=dict(
description="""Whether the bounding box found by looking at the
image background should be set even if it touches one of the sides of
the image. Set to False to avoid chopping edges off certain images, or
True if that is not an issue and you wish to use a sweeping explorer.""",
dataType='bool',
count=1,
constraints='bool',
accessMode='ReadWrite'
),
numImages=dict(
description="""Number of images that the sensor has loaded.""",
dataType='UInt32',
count=1,
constraints='',
accessMode='Read'
),
logLocationOnOriginalImage=dict(
description="""Whether to overlay the location rectangle on the original image instead
of the filtered image. Does not work if the two images do not have the
same size, and may be nonsensical even if they do (for example, if a filter
moved the object within the image).""",
dataType='bool',
count=1,
constraints='bool',
accessMode='ReadWrite'
),
outputImage=dict(
description="""Serialized version of the current output image(s). If depth > 1,
multiple serialized images will be returned in a list. To deserialize:
from nupic.image import deserializeImage
outputImage = deserializeImage(sensor.getParameter('outputImage'))""",
dataType='Byte',
count=0,
constraints='',
accessMode='Read'
),
logFilteredImages=dict(
description="""Toggle for writing the intermediate versions of images to disk
as they pass through the filter chain.""",
dataType='bool',
count=1,
constraints='bool',
accessMode='ReadWrite'
),
width=dict(
description="""Width of the image, in pixels.""",
dataType='UInt32',
count=1,
constraints='interval: [1, ...]',
accessMode='ReadWrite'
),
auxDataWidth=dict(
description="""The number of elements in in the auxiliary data vector.""",
dataType='int',
count=1,
constraints='',
accessMode='ReadWrite'
),
categoryOutputFile=dict(
description="""Name of file to which to write category number on each compute.""",
dataType='Byte',
count=0,
constraints='',
accessMode='ReadWrite'
),
logLocationImages=dict(
description="""Toggle for writing an image to disk on each iteration which shows
the location of the sensor window.""",
dataType='bool',
count=1,
constraints='bool',
accessMode='ReadWrite'
),
nextImageInfo=dict(
description="""Dictionary of information for the image which will be used for the next
compute.""",
dataType='Byte',
count=0,
constraints='',
accessMode='Read'
),
enabledWidth=dict(
description="""Width of the enabled 'window', in pixels.""",
dataType='UInt32',
count=1,
constraints='interval: [1, ...]',
accessMode='ReadWrite'
),
numIterations=dict(
description="""Number of iterations necessary to fully explore all loaded images. Only
some explorers support this. Use the getNumIterations command if you wish to
get the number of iterations for a particular image.""",
dataType='UInt32',
count=1,
constraints='',
accessMode='Read'
),
logText=dict(
description="""Toggle for verbose logging to imagesensor_log.txt.""",
dataType='bool',
count=1,
constraints='bool',
accessMode='ReadWrite'
),
explorer=dict(
description="""Explorer (used to move the sensor through the input space).
Specify as a string (just the explorer name) or a list containing both the
explorer name and a dictionary specifying its arguments.""",
dataType='Byte',
count=0,
constraints='',
accessMode='ReadWrite'
),
imageInfo=dict(
description="""A list with a dictionary of information for each image that has
been loaded.""",
dataType='Byte',
count=0,
constraints='',
accessMode='Read'
),
useAux=dict(
description="""Use auxiliary input data at the classifier level""",
dataType='bool',
count=1,
constraints='bool',
accessMode='ReadWrite'
),
automaskingTolerance=dict(
description="""Controls the process by which bounding box masks
are automatically generated from images based on similarity to the
specified 'background' pixel value. The bounding box will enclose all
pixels in the image that differ from 'background' by more than
the value specified in 'automaskingTolerance'. Default is 0, which
generates bounding boxes that enclose all pixels that differ at all
from the background. In general, increasing the value of
'automaskingTolerance' will produce tighter (smaller) bounding box masks.""",
dataType='UInt32',
count=1,
constraints='interval: [0, 255]',
accessMode='ReadWrite'
),
activeOutputCount=dict(
description="""The number of active elements in the dataOut output.""",
dataType='UInt32',
count=1,
constraints='',
accessMode='Read'
),
memoryLimit=dict(
description="""Maximum amount of memory that ImageSensor should use for storing images,
in megabytes. ImageSensor will unload images and filter outputs to stay beneath
this ceiling. Set to -1 for no limit.""",
dataType='int',
count=1,
constraints='interval: [-1, ...]',
accessMode='ReadWrite'
),
logDir=dict(
description="""Name of the imagesensor log directory, which is created in the session
bundle if any logging options are enabled. Default is imagesensor_log.""",
dataType='Byte',
count=0,
constraints='',
accessMode='ReadWrite'
),
background=dict(
description="""Value of "background" pixels. May be used to pad images during sweeping,
as well as to find the bounds of an object if no mask is available.""",
dataType='UInt32',
count=1,
constraints='interval: [0, 255]',
accessMode='ReadWrite'
),
position=dict(
description="""The position of the sensor that will be used for the *next* compute,
as a dictionary.""",
dataType='Byte',
count=0,
constraints='',
accessMode='Read'
),
auxData=dict(
description="""List of Auxiliary Data for every image in the image list""",
dataType='Byte',
count=0,
constraints='',
accessMode='Read'
),
invertOutput=dict(
description="""Whether to invert the pixel values before sending an image to the
network. If invertOutput is enabled, a white object on a black background
becomes a black object on a white background.""",
dataType='bool',
count=1,
constraints='bool',
accessMode='ReadWrite'
),
categoryInfo=dict(
description="""A list with a tuple for each category that the sensor has learned. The
tuple contains the category name (i.e. 'dog') and a serialized version of
an example image for the category. To deserialize:
from nupic.regions.ImageSensor import deserializeCategoryInfo
categoryInfo = deserializeCategoryInfo(sensor.getParameter('categoryInfo'))""",
dataType='Byte',
count=0,
constraints='',
accessMode='ReadWrite'
),
prevImageInfo=dict(
description="""Dictionary of information for the image used during the previous compute.""",
dataType='Byte',
count=0,
constraints='',
accessMode='Read'
),
logOriginalImages=dict(
description="""Toggle for writing the original, unfiltered version of the current
image to disk on each iteration.""",
dataType='bool',
count=1,
constraints='bool',
accessMode='ReadWrite'
),
enabledHeight=dict(
description="""Height of the enabled 'window', in pixels.""",
dataType='UInt32',
count=1,
constraints='interval: [1, ...]',
accessMode='ReadWrite'
),
depth=dict(
description="""Number of images to send out simultaneously.""",
dataType='UInt32',
count=1,
constraints='interval: [1, ...]',
accessMode='Read'
),
mode=dict(
description="""'gray' (8-bit grayscale) or 'bw' (1-bit black and white).""",
dataType='Byte',
count=0,
constraints='enum: gray, bw',
accessMode='Read'
),
logBoundingBox=dict(
description="""Toggle for logging the bounding box information on each iteration.""",
dataType='bool',
count=1,
constraints='bool',
accessMode='ReadWrite'
),
blankWithReset=dict(
description="""** DEPRECATED ** Whether to send a blank output every time the explorer
generates a reset signal (such as when beginning a new sweep). Turning
on blanks increases the number of iterations.""",
dataType='bool',
count=1,
constraints='bool',
accessMode='ReadWrite'
),
metadata=dict(
description="""Parameter that contains a dict of metadata for the most
recently generated output image.""",
dataType='Byte',
count=0,
constraints='',
accessMode='Read'
),
postFilters=dict(
description="""List of filters to apply to each image just before the image
is sent to the network. Each element in the list should either be a string
(just the filter name) or a list containing both the filter name and a
dictionary specifying its arguments.""",
dataType='Byte',
count=0,
constraints='',
accessMode='ReadWrite'
),
maxOutputVectorCount=dict(
description="""(alias for numIterations) Number of iterations necessary to fully explore
all loaded images. Only some explorers support this. Use the getNumIterations command
if you wish to get the number of iterations for a particular image.""",
dataType='UInt32',
count=1,
constraints='',
accessMode='Read'
)
),
commands=dict(
loadSingleImage=dict(description='load a single image'),
loadMultipleImages=dict(description='load multiple images'),
)
)
return ns
#def getSpec(self):
# """Return the NodeSpec for this PyNode."""
#
# parent = PyNode.getSpec(self)
# out = NodeSpec(
# description=ImageSensor.__doc__,
# singleNodeOnly=False,
# inputs = [],
# outputs = [
# NodeSpecItem(name="dataOut", type=RealTypeName, elementCount=0,
# isDefaultOutput2=True,
# description="""Pixels of the image."""),
# NodeSpecItem(name="categoryOut", type=RealTypeName, regionLevel2=True,
# description="""Index of the current image's category."""),
# NodeSpecItem(name="resetOut", type=RealTypeName, regionLevel2=True,
# description="""Boolean reset output."""),
# NodeSpecItem(name="bboxOut", type=RealTypeName, regionLevel2=True,
# elementCount=4,
# description="""Bounding box output (4-tuple)."""),
# NodeSpecItem(name="alphaOut", type=RealTypeName,
# elementCount=0,
# description="""Alpha channel output."""),
# NodeSpecItem(name="partitionOut", type=RealTypeName, regionLevel2=True,
# description="""Index of the leave-one-out partition associated with the current image."""),
# NodeSpecItem(name="auxDataOut", type=RealTypeName, elementCount=0,
# regionLevel2=True,
# description="""Auxiliary data sent directly to the classifier.""")
# ],
# parameters = [
# NodeSpecItem(name="useAux", type="bool", constraints="bool", access="cgs",
# value=False,
# description="Use auxiliary input data at the classifier level"),
# NodeSpecItem(name="width", type="uint", access="cg",
# constraints="interval: [1, ...]", value=1,
# description="""Width of the image, in pixels."""),
# NodeSpecItem(name="height", type="uint", access="cg",
# constraints="interval: [1, ...]", value=1,
# description="""Height of the image, in pixels."""),
# NodeSpecItem(name="depth", type="uint", access="cg",
# constraints="interval: [1, ...]", value=1,
# description="""Number of images to send out simultaneously."""),
# NodeSpecItem(name="mode", type="string", access="cg",
# constraints="enum: gray, bw", value='gray',
# description="""'gray' (8-bit grayscale) or 'bw' (1-bit black and white)."""),
# NodeSpecItem(name="enabledWidth", type="uint", access="gs",
# constraints="interval: [1, ...]",
# description="""Width of the enabled 'window', in pixels."""),
# NodeSpecItem(name="enabledHeight", type="uint", access="gs",
# constraints="interval: [1, ...]",
# description="""Height of the enabled 'window', in pixels."""),
# NodeSpecItem(name="activeOutputCount", type="uint", access="g",
# description="""The number of active elements in the dataOut output."""),
# NodeSpecItem(name="background", type="uint", access="cgs",
# constraints="interval: [0, 255]", value=0,
# description="""Value of "background" pixels. May be used to pad images during sweeping,
# as well as to find the bounds of an object if no mask is available."""),
# NodeSpecItem(name="automaskingTolerance", type="uint", access="cgs",
# constraints="interval: [0, 255]", value=0,
# description="""Controls the process by which bounding box masks
# are automatically generated from images based on similarity to the
# specified 'background' pixel value. The bounding box will enclose all
# pixels in the image that differ from 'background' by more than
# the value specified in 'automaskingTolerance'. Default is 0, which
# generates bounding boxes that enclose all pixels that differ at all
# from the background. In general, increasing the value of
# 'automaskingTolerance' will produce tighter (smaller) bounding box masks."""),
# NodeSpecItem(name="automaskingPadding", type="uint", access="cgs",
# constraints="interval: [0, ...]", value=0,
# description="""Affects the process by which bounding box masks
# are automatically generated from images. After computing the
# bounding box based on image similarity with respect to the background,
# the box will be expanded by 'automaskPadding' pixels in all four
# directions (constrained by the original size of the image.)"""),
# NodeSpecItem(name="invertOutput", type="bool", constraints="bool", access="cgs",
# value=False,
# description="""Whether to invert the pixel values before sending an image to the
# network. If invertOutput is enabled, a white object on a black background
# becomes a black object on a white background."""),
# NodeSpecItem(name="filters", type="PyObject", access="cgs",
# value=[],
# description="""List of filters to apply to each image. Each element in the
# list should be either a string (just the filter name) or a list containing
# both the filter name and a dictionary specifying its arguments."""),
# NodeSpecItem(name="postFilters", type="PyObject", access="cgs",
# value=[],
# description="""List of filters to apply to each image just before the image
# is sent to the network. Each element in the list should either be a string
# (just the filter name) or a list containing both the filter name and a
# dictionary specifying its arguments."""),
# NodeSpecItem(name="explorer", type="PyObject", access="cgs",
# value="Flash",
# description="""Explorer (used to move the sensor through the input space).
# Specify as a string (just the explorer name) or a list containing both the
# explorer name and a dictionary specifying its arguments."""),
# NodeSpecItem(name="categoryOutputFile", type="string", access="cgs",
# value="",
# description="""Name of file to which to write category number on each compute."""),
# NodeSpecItem(name="logText", type="bool", constraints="bool", access="cgs",
# value=False,
# description="""Toggle for verbose logging to imagesensor_log.txt."""),
# NodeSpecItem(name="logOutputImages", type="bool", constraints="bool", access="cgs",
# value=False,
# description="""Toggle for writing each output to disk (as an image)
# on each iteration."""),
# NodeSpecItem(name="logOriginalImages", type="bool", constraints="bool", access="cgs",
# value=False,
# description="""Toggle for writing the original, unfiltered version of the current
# image to disk on each iteration."""),
# NodeSpecItem(name="logFilteredImages", type="bool", constraints="bool", access="cgs",
# value=False,
# description="""Toggle for writing the intermediate versions of images to disk
# as they pass through the filter chain."""),
# NodeSpecItem(name="logLocationImages", type="bool", constraints="bool", access="cgs",
# value=False,
# description="""Toggle for writing an image to disk on each iteration which shows
# the location of the sensor window."""),
# NodeSpecItem(name="logLocationOnOriginalImage", type="bool", constraints="bool", access="cgs",
# value=False,
# description="""Whether to overlay the location rectangle on the original image instead
# of the filtered image. Does not work if the two images do not have the
# same size, and may be nonsensical even if they do (for example, if a filter
# moved the object within the image)."""),
# NodeSpecItem(name="logBoundingBox", type="bool", constraints="bool", access="cgs",
# value=False,
# description="""Toggle for logging the bounding box information on each iteration."""),
# NodeSpecItem(name="logDir", type="string", access="cgs",
# value="imagesensor_log",
# description="""Name of the imagesensor log directory, which is created in the session
# bundle if any logging options are enabled. Default is imagesensor_log."""),
# NodeSpecItem(name="memoryLimit", type="int", access="cgs",
# constraints="interval: [-1, ...]", value=100,
# description="""Maximum amount of memory that ImageSensor should use for storing images,
# in megabytes. ImageSensor will unload images and filter outputs to stay beneath
# this ceiling. Set to -1 for no limit."""),
# NodeSpecItem(name="numImages", type="uint", access="g",
# description="""Number of images that the sensor has loaded."""),
# NodeSpecItem(name="numMasks", type="uint", access="g",
# description="""Number of masks that the sensor has loaded."""),
# NodeSpecItem(name="numIterations", type="uint", access="g",
# description="""Number of iterations necessary to fully explore all loaded images. Only
# some explorers support this. Use the getNumIterations command if you wish to
# get the number of iterations for a particular image."""),
# NodeSpecItem(name="maxOutputVectorCount", type="uint", access="g",
# description="""(alias for numIterations) Number of iterations necessary to fully explore
# all loaded images. Only some explorers support this. Use the getNumIterations command
# if you wish to get the number of iterations for a particular image."""),
# NodeSpecItem(name="blankWithReset", type="bool", constraints="bool", access="cgs",
# value=False,
# description="""** DEPRECATED ** Whether to send a blank output every time the explorer
# generates a reset signal (such as when beginning a new sweep). Turning
# on blanks increases the number of iterations."""),
# NodeSpecItem(name="position", type="PyObject", access="g",
# description="""The position of the sensor that will be used for the *next* compute,
# as a dictionary."""),
# NodeSpecItem(name="prevPosition", type="PyObject", access="g",
# description="""The position of the sensor from the *previous* compute, as a
# dictionary. Because "outputImage" and "locationImage" match the output of the
# previous compute (not the upcoming one), they do not correlate with the
# "position" parameter; use this parameter instead."""),
# NodeSpecItem(name="imageInfo", type="PyObject", access="g",
# description="""A list with a dictionary of information for each image that has
# been loaded."""),
# NodeSpecItem(name="prevImageInfo", type="PyObject", access="g",
# description="""Dictionary of information for the image used during the previous compute."""),
# NodeSpecItem(name="nextImageInfo", type="PyObject", access="g",
# description="""Dictionary of information for the image which will be used for the next
# compute."""),
# NodeSpecItem(name="categoryInfo", type="PyObject", access="gs",
# description="""A list with a tuple for each category that the sensor has learned. The
# tuple contains the category name (i.e. 'dog') and a serialized version of
# an example image for the category. To deserialize:
# from nupic.regions.ImageSensor import deserializeCategoryInfo
# categoryInfo = deserializeCategoryInfo(sensor.getParameter('categoryInfo'))"""),
# NodeSpecItem(name="outputImage", type="PyObject", access="g",
# description="""Serialized version of the current output image(s). If depth > 1,
# multiple serialized images will be returned in a list. To deserialize:
# from nupic.image import deserializeImage
# outputImage = deserializeImage(sensor.getParameter('outputImage'))"""),
# NodeSpecItem(name="outputImageWithAlpha", type="PyObject", access="g",
# description="""Serialized version of the current output image(s) with the alpha channel.
# If depth > 1, multiple serialized images will be returned in a list. To deserialize:
# from nupic.image import deserializeImage
# outputImage = deserializeImage(sensor.getParameter('outputImageWithAlpha'))"""),
# NodeSpecItem(name="originalImage", type="string", access="g",
# description="""Serialized version of the original, unfiltered version of the
# current image. To deserialize:
# from nupic.image import deserializeImage
# originalImage = deserializeImage(sensor.getParameter('originalImage'))"""),
# NodeSpecItem(name="locationImage", type="string", access="g",
# description="""Serialized version of the current 'location image', which shows the
# position of the sensor overlaid on the filtered image (optionally, the
# original image). To deserialize:
# from nupic.image import deserializeImage
# locationImage = deserializeImage(sensor.getParameter('locationImage'))"""),
# NodeSpecItem(name="minimalBoundingBox", type="bool", constraints="bool", access="cgs",
# description="""Whether the bounding box found by looking at the
# image background should be set even if it touches one of the sides of
# the image. Set to False to avoid chopping edges off certain images, or
# True if that is not an issue and you wish to use a sweeping explorer."""),
# NodeSpecItem(name="auxDataWidth", type="int", access="cgs",
# description="""The number of elements in in the auxiliary data vector."""),
# NodeSpecItem(name="auxData", type="PyObject", access="g",
# description="""List of Auxiliary Data for every image in the image list"""),
# NodeSpecItem(name="metadata", type="string", access="g",
# description="""Parameter that contains a dict of metadata for the most
# recently generated output image."""),
# ]
# )
# return out + parent
#---------------------------------------------------------------------------------
def initialize(self, dims, splitterMaps):
pass
#---------------------------------------------------------------------------------
def getOutputElementCount(self, name):
if name == 'auxDataOut':
return self._auxDataWidth if self._auxDataWidth else 0
elif name == 'dataOut':
return 1
elif name == 'alphaOut':
return 1
else:
raise Exception('Unknown output: ' + name)
#def interpret2(self, command):
# """NuPIC 2 replacement for interpret in NuPIC 1 nodes"""
# # This process effectively strips out one level of quotes; manifests
# # as a problem with pathnames on windows
# exec(command.replace("\\", "\\\\"))
def serializeCategoryInfo(categoryInfo):
return [[name, serializeImage(image)] for name, image in categoryInfo]
def deserializeCategoryInfo(sCategoryInfo):
if sCategoryInfo is None: return []
return [[name, (deserializeImage(sImage) if sImage is not None else None)]
for name, sImage in sCategoryInfo]
def _serializeImageList(imageList):
sImageList = []
for i in xrange(len(imageList)):
sImageList.append(imageList[i].copy())
if sImageList[i]['image']:
sImageList[i]['image'] = serializeImage(sImageList[i]['image'])
if sImageList[i]['filtered']:
sImageList[i]['filtered'] = _serializeAllImages(sImageList[i]['filtered'])
return sImageList
def _deserializeImageList(sImageList):
imageList = sImageList
for i in xrange(len(imageList)):
if imageList[i]['image']:
imageList[i]['image'] = deserializeImage(imageList[i]['image'])
if imageList[i]['filtered']:
imageList[i]['filtered'] = _deserializeAllImages(imageList[i]['filtered'])
return imageList
def _serializeAllImages(old):
new = {}
for key in old:
new[key] = [serializeImage(image) for image in old[key]]
return new
def _deserializeAllImages(old):
new = {}
for key in old:
new[key] = [deserializeImage(sImage) for sImage in old[key]]
return new
| gpl-3.0 |
useabode/redash | redash/query_runner/big_query.py | 4 | 10388 | import datetime
import json
import logging
import sys
import time
from base64 import b64decode
import httplib2
import requests
from redash import settings
from redash.query_runner import *
from redash.utils import JSONEncoder
logger = logging.getLogger(__name__)
try:
import apiclient.errors
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.service_account import ServiceAccountCredentials
from oauth2client.contrib import gce
enabled = True
except ImportError:
enabled = False
types_map = {
'INTEGER': TYPE_INTEGER,
'FLOAT': TYPE_FLOAT,
'BOOLEAN': TYPE_BOOLEAN,
'STRING': TYPE_STRING,
'TIMESTAMP': TYPE_DATETIME,
}
def transform_row(row, fields):
column_index = 0
row_data = {}
for cell in row["f"]:
field = fields[column_index]
cell_value = cell['v']
if cell_value is None:
pass
# Otherwise just cast the value
elif field['type'] == 'INTEGER':
cell_value = int(cell_value)
elif field['type'] == 'FLOAT':
cell_value = float(cell_value)
elif field['type'] == 'BOOLEAN':
cell_value = cell_value.lower() == "true"
elif field['type'] == 'TIMESTAMP':
cell_value = datetime.datetime.fromtimestamp(float(cell_value))
row_data[field["name"]] = cell_value
column_index += 1
return row_data
def _load_key(filename):
f = file(filename, "rb")
try:
return f.read()
finally:
f.close()
def _get_query_results(jobs, project_id, job_id, start_index):
query_reply = jobs.getQueryResults(projectId=project_id, jobId=job_id, startIndex=start_index).execute()
logging.debug('query_reply %s', query_reply)
if not query_reply['jobComplete']:
time.sleep(10)
return _get_query_results(jobs, project_id, job_id, start_index)
return query_reply
class BigQuery(BaseQueryRunner):
noop_query = "SELECT 1"
@classmethod
def enabled(cls):
return enabled
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'projectId': {
'type': 'string',
'title': 'Project ID'
},
'jsonKeyFile': {
"type": "string",
'title': 'JSON Key File'
},
'totalMBytesProcessedLimit': {
"type": "number",
'title': 'Scanned Data Limit (MB)'
},
'userDefinedFunctionResourceUri': {
"type": "string",
'title': 'UDF Source URIs (i.e. gs://bucket/date_utils.js, gs://bucket/string_utils.js )'
},
'useStandardSql': {
"type": "boolean",
'title': "Use Standard SQL (Beta)",
},
'loadSchema': {
"type": "boolean",
"title": "Load Schema"
},
'maximumBillingTier': {
"type": "number",
"title": "Maximum Billing Tier"
}
},
'required': ['jsonKeyFile', 'projectId'],
"order": ['projectId', 'jsonKeyFile', 'loadSchema', 'useStandardSql', 'totalMBytesProcessedLimit', 'maximumBillingTier', 'userDefinedFunctionResourceUri'],
'secret': ['jsonKeyFile']
}
@classmethod
def annotate_query(cls):
return False
def __init__(self, configuration):
super(BigQuery, self).__init__(configuration)
def _get_bigquery_service(self):
scope = [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/drive"
]
key = json.loads(b64decode(self.configuration['jsonKeyFile']))
creds = ServiceAccountCredentials.from_json_keyfile_dict(key, scope)
http = httplib2.Http(timeout=settings.BIGQUERY_HTTP_TIMEOUT)
http = creds.authorize(http)
return build("bigquery", "v2", http=http)
def _get_project_id(self):
return self.configuration["projectId"]
def _get_total_bytes_processed(self, jobs, query):
job_data = {
"query": query,
"dryRun": True,
}
if self.configuration.get('useStandardSql', False):
job_data['useLegacySql'] = False
response = jobs.query(projectId=self._get_project_id(), body=job_data).execute()
return int(response["totalBytesProcessed"])
def _get_query_result(self, jobs, query):
project_id = self._get_project_id()
job_data = {
"configuration": {
"query": {
"query": query,
}
}
}
if self.configuration.get('useStandardSql', False):
job_data['configuration']['query']['useLegacySql'] = False
if "userDefinedFunctionResourceUri" in self.configuration:
resource_uris = self.configuration["userDefinedFunctionResourceUri"].split(',')
job_data["configuration"]["query"]["userDefinedFunctionResources"] = map(
lambda resource_uri: {"resourceUri": resource_uri}, resource_uris)
if "maximumBillingTier" in self.configuration:
job_data["configuration"]["query"]["maximumBillingTier"] = self.configuration["maximumBillingTier"]
insert_response = jobs.insert(projectId=project_id, body=job_data).execute()
current_row = 0
query_reply = _get_query_results(jobs, project_id=project_id,
job_id=insert_response['jobReference']['jobId'], start_index=current_row)
logger.debug("bigquery replied: %s", query_reply)
rows = []
while ("rows" in query_reply) and current_row < query_reply['totalRows']:
for row in query_reply["rows"]:
rows.append(transform_row(row, query_reply["schema"]["fields"]))
current_row += len(query_reply['rows'])
query_reply = jobs.getQueryResults(projectId=project_id, jobId=query_reply['jobReference']['jobId'],
startIndex=current_row).execute()
columns = [{'name': f["name"],
'friendly_name': f["name"],
'type': types_map.get(f['type'], "string")} for f in query_reply["schema"]["fields"]]
data = {
"columns": columns,
"rows": rows
}
return data
def get_schema(self, get_stats=False):
if not self.configuration.get('loadSchema', False):
return []
service = self._get_bigquery_service()
project_id = self._get_project_id()
datasets = service.datasets().list(projectId=project_id).execute()
schema = []
for dataset in datasets.get('datasets', []):
dataset_id = dataset['datasetReference']['datasetId']
tables = service.tables().list(projectId=project_id, datasetId=dataset_id).execute()
for table in tables.get('tables', []):
table_data = service.tables().get(projectId=project_id, datasetId=dataset_id, tableId=table['tableReference']['tableId']).execute()
schema.append({'name': table_data['id'], 'columns': map(lambda r: r['name'], table_data['schema']['fields'])})
return schema
def run_query(self, query, user):
logger.debug("BigQuery got query: %s", query)
bigquery_service = self._get_bigquery_service()
jobs = bigquery_service.jobs()
try:
if "totalMBytesProcessedLimit" in self.configuration:
limitMB = self.configuration["totalMBytesProcessedLimit"]
processedMB = self._get_total_bytes_processed(jobs, query) / 1000.0 / 1000.0
if limitMB < processedMB:
return None, "Larger than %d MBytes will be processed (%f MBytes)" % (limitMB, processedMB)
data = self._get_query_result(jobs, query)
error = None
json_data = json.dumps(data, cls=JSONEncoder)
except apiclient.errors.HttpError, e:
json_data = None
if e.resp.status == 400:
error = json.loads(e.content)['error']['message']
else:
error = e.content
except KeyboardInterrupt:
error = "Query cancelled by user."
json_data = None
except Exception:
raise sys.exc_info()[1], None, sys.exc_info()[2]
return json_data, error
class BigQueryGCE(BigQuery):
@classmethod
def type(cls):
return "bigquery_gce"
@classmethod
def enabled(cls):
try:
# check if we're on a GCE instance
requests.get('http://metadata.google.internal')
except requests.exceptions.ConnectionError:
return False
return True
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'totalMBytesProcessedLimit': {
"type": "number",
'title': 'Total MByte Processed Limit'
},
'userDefinedFunctionResourceUri': {
"type": "string",
'title': 'UDF Source URIs (i.e. gs://bucket/date_utils.js, gs://bucket/string_utils.js )'
},
'useStandardSql': {
"type": "boolean",
'title': "Use Standard SQL (Beta)",
},
'loadSchema': {
"type": "boolean",
"title": "Load Schema"
}
}
}
def _get_project_id(self):
return requests.get('http://metadata/computeMetadata/v1/project/project-id', headers={'Metadata-Flavor': 'Google'}).content
def _get_bigquery_service(self):
credentials = gce.AppAssertionCredentials(scope='https://www.googleapis.com/auth/bigquery')
http = httplib2.Http()
http = credentials.authorize(http)
return build("bigquery", "v2", http=http)
register(BigQuery)
register(BigQueryGCE)
| bsd-2-clause |
anielsen001/scipy | scipy/stats/stats.py | 9 | 179575 | # Copyright (c) Gary Strangman. All rights reserved
#
# Disclaimer
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
#
# Heavily adapted for use by SciPy 2002 by Travis Oliphant
"""
A collection of basic statistical functions for python. The function
names appear below.
Some scalar functions defined here are also available in the scipy.special
package where they work on arbitrary sized arrays.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful.
Central Tendency
----------------
.. autosummary::
:toctree: generated/
gmean
hmean
mode
Moments
-------
.. autosummary::
:toctree: generated/
moment
variation
skew
kurtosis
normaltest
Altered Versions
----------------
.. autosummary::
:toctree: generated/
tmean
tvar
tstd
tsem
describe
Frequency Stats
---------------
.. autosummary::
:toctree: generated/
itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
Variability
-----------
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
sem
zmap
zscore
iqr
Trimming Functions
------------------
.. autosummary::
:toctree: generated/
threshold
trimboth
trim1
Correlation Functions
---------------------
.. autosummary::
:toctree: generated/
pearsonr
fisher_exact
spearmanr
pointbiserialr
kendalltau
linregress
theilslopes
Inferential Stats
-----------------
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
power_divergence
ks_2samp
mannwhitneyu
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
Probability Calculations
------------------------
.. autosummary::
:toctree: generated/
chisqprob
betai
ANOVA Functions
---------------
.. autosummary::
:toctree: generated/
f_oneway
f_value
Support Functions
-----------------
.. autosummary::
:toctree: generated/
ss
square_of_sums
rankdata
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
from __future__ import division, print_function, absolute_import
from collections import namedtuple
import math
import warnings
# Scipy imports.
from scipy._lib.six import callable, string_types, xrange
from scipy._lib._version import NumpyVersion
from numpy import array, asarray, ma, zeros
import scipy.special as special
import scipy.linalg as linalg
import numpy as np
from . import distributions, mstats_basic, _stats
from ._distn_infrastructure import _lazywhere
from ._stats_mstats_common import _find_repeats, linregress, theilslopes
from ._stats import _kendall_condis
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore', 'histogram',
'histogram2', 'cumfreq', 'relfreq', 'obrientransform',
'signaltonoise', 'sem', 'zmap', 'zscore', 'iqr', 'threshold',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway',
'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr',
'kendalltau', 'linregress', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest',
'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'chisqprob', 'betai',
'f_value_wilks_lambda', 'f_value', 'f_value_multivariate',
'ss', 'square_of_sums', 'fastsort', 'rankdata',
'combine_pvalues', ]
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
def _contains_nan(a, nan_policy='propagate'):
policies = ['propagate', 'raise', 'omit']
if nan_policy not in policies:
raise ValueError("nan_policy must be one of {%s}" %
', '.join("'%s'" % s for s in policies))
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# If the check cannot be properly performed we fallback to omiting
# nan values and raising a warning. This can happen when attempting to
# sum things that are not numbers (e.g. as in the function `mode`).
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly checked for nan "
"values. nan values will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return (contains_nan, nan_policy)
#####################################
# CENTRAL TENDENCY #
#####################################
def gmean(a, axis=0, dtype=None):
"""
Compute the geometric mean along the specified axis.
Returns the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
gmean : ndarray
see dtype parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
"""
if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype: # Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return np.exp(log_a.mean(axis=axis))
def hmean(a, axis=0, dtype=None):
"""
Calculates the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray
see `dtype` parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
if np.all(a > 0): # Harmonic mean only defined if greater than zero
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis is None:
a = a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
return size / np.sum(1.0/a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater than zero")
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
def mode(a, axis=0, nan_policy='propagate'):
"""
Returns an array of the modal (most common) value in the passed array.
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
(array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
(array([3]), array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return np.array([]), np.array([])
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return ModeResult(mostfrequent, oldcounts)
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""
Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : float
See also
--------
trim_mean : returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = _mask_to_limits(a.ravel(), limits, inclusive)
return am.mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed variance
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float).ravel()
if limits is None:
n = len(a)
return a.var() * n/(n-1.)
am = _mask_to_limits(a, limits, inclusive)
return np.ma.var(am, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed minimum
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
array of values
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmin : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed maximum
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
array of values
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmax : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed sample standard deviation
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0, nan_policy='propagate'):
r"""
Calculates the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of points.
It is often used to calculate coefficients of skewness and kurtosis due
to its close relationship with them.
Parameters
----------
a : array_like
data
moment : int or array_like of ints, optional
order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See also
--------
kurtosis, skew, describe
Notes
-----
The k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
Where n is the number of samples and x-bar is the mean. This function uses
exponentiation by squares [1]_ for efficiency.
References
----------
.. [1] http://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.moment(a, moment, axis)
if a.size == 0:
# empty array, return nan(s) with shape matching `moment`
if np.isscalar(moment):
return np.nan
else:
return np.ones(np.asarray(moment).shape, dtype=np.float64) * np.nan
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mmnt = [_moment(a, i, axis) for i in moment]
return np.array(mmnt)
else:
return _moment(a, moment, axis)
def _moment(a, moment, axis):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0:
# When moment equals 0, the result is 1, by definition.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.ones(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return 1.0
elif moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n-1)/2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def variation(a, axis=0, nan_policy='propagate'):
"""
Computes the coefficient of variation, the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.variation(a, axis)
return a.std(axis) / a.mean(axis)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
"""
Computes the skewness of a data set.
For normally distributed data, the skewness should be about 0. A skewness
value > 0 means that there is more weight in the left tail of the
distribution. The function `skewtest` can be used to determine if the
skewness value is close enough to 0, statistically speaking.
Parameters
----------
a : ndarray
data
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
zero = (m2 == 0)
vals = _lazywhere(~zero, (m2, m3),
lambda m2, m3: m3 / m2**1.5,
0.)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n-1.0)*n) / (n-2.0) * m3/m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""
Computes the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
data for which the kurtosis is calculated
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
n = a.shape[axis]
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = (m2 == 0)
olderr = np.seterr(all='ignore')
try:
vals = np.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
if fisher:
return vals - 3
else:
return vals
DescribeResult = namedtuple('DescribeResult',
('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""
Computes several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
nobs : int
Number of observations (length of data along `axis`).
minmax: tuple of ndarrays or floats
Minimum and maximum value of data array.
mean : ndarray or float
Arithmetic mean of data along axis.
variance : ndarray or float
Unbiased variance of the data along axis, denominator is number of
observations minus one.
skewness : ndarray or float
Skewness, based on moment calculations with denominator equal to
the number of observations, i.e. no degrees of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher). The kurtosis is normalized so that it is
zero for the normal distribution. No degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.1666666666666661,
skewness=0.0, kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([ 2., 3.]), variance=array([ 2., 2.]),
skewness=array([ 0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
def skewtest(a, axis=0, nan_policy='propagate'):
"""
Tests whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
a 2-sided p-value for the hypothesis test
Notes
-----
The sample size must be at least 8.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = float(a.shape[axis])
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue'))
def kurtosistest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a dataset has normal kurtosis
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
Parameters
----------
a : array
array of the sample data
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The 2-sided p-value for the hypothesis test
Notes
-----
Valid only for n>20. The Z-score is set to 0 for bad entries.
This function uses the method described in [1]_.
References
----------
.. [1] see e.g. F. J. Anscombe, W. J. Glynn, "Distribution of the kurtosis
statistic b2 for normal samples", Biometrika, vol. 70, pp. 227-234, 1983.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis)
n = float(a.shape[axis])
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) # [1]_ Eq. 1
x = (b2-E) / np.sqrt(varb2) # [1]_ Eq. 4
# [1]_ Eq. 2:
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
# [1]_ Eq. 3:
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
denom = np.where(denom < 0, 99, denom)
term2 = np.where(denom < 0, term1, np.power((1-2.0/A)/denom, 1/3.0))
Z = (term1 - term2) / np.sqrt(2/(9.0*A)) # [1]_ Eq. 5
Z = np.where(denom == 99, 0, Z)
if Z.ndim == 0:
Z = Z[()]
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
def normaltest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the data to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size", Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Tests for departure from
normality", Biometrika, 60, 613-622
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
def jarque_bera(x):
"""
Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(987654321)
>>> x = np.random.normal(0, 1, 100000)
>>> y = np.random.rayleigh(1, 100000)
>>> stats.jarque_bera(x)
(4.7165707989581342, 0.09458225503041906)
>>> stats.jarque_bera(y)
(6713.7098548143422, 0.0)
"""
x = np.asarray(x)
n = float(x.size)
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return jb_value, p
#####################################
# FREQUENCY FUNCTIONS #
#####################################
def itemfreq(a):
"""
Returns a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""
Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
- fraction: ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``.
- lower: ``i``.
- higher: ``j``.
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For Numpy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.ones(np.asarray(per).shape, dtype=np.float64) * np.nan
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted, i, interpolation_method, axis)
for i in per]
return np.array(score)
if (per < 0) or (per > 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted.ndim
idx = per / 100. * (sorted.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""
The percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
This optional parameter specifies the interpretation of the
resulting score:
- "rank": Average percentage ranking of score. In case of
multiple matches, average the percentage rankings of
all matching scores.
- "weak": This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80%
means that 80% of values are less than or equal
to the provided score.
- "strict": Similar to "weak", except that only values that are
strictly less than the given score are counted.
- "mean": The average of the "weak" and "strict" scores, often used in
testing. See
http://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
a = np.array(a)
n = len(a)
if kind == 'rank':
if not np.any(a == score):
a = np.append(a, score)
a_len = np.array(list(range(len(a))))
else:
a_len = np.array(list(range(len(a)))) + 1.0
a = np.sort(a)
idx = [a == score]
pct = (np.mean(a_len[idx]) / n) * 100.0
return pct
elif kind == 'strict':
return np.sum(a < score) / float(n) * 100
elif kind == 'weak':
return np.sum(a <= score) / float(n) * 100
elif kind == 'mean':
return (np.sum(a < score) + np.sum(a <= score)) * 50 / float(n)
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
@np.deprecate(message=("scipy.stats.histogram2 is deprecated in scipy 0.16.0; "
"use np.histogram2d instead"))
def histogram2(a, bins):
"""
Compute histogram using divisions in bins.
Count the number of times values from array `a` fall into
numerical ranges defined by `bins`. Range x is given by
bins[x] <= range_x < bins[x+1] where x =0,N and N is the
length of the `bins` array. The last range is given by
bins[N] <= range_N < infinity. Values less than bins[0] are
not included in the histogram.
Parameters
----------
a : array_like of rank 1
The array of values to be assigned into bins
bins : array_like of rank 1
Defines the ranges of values to use during histogramming.
Returns
-------
histogram2 : ndarray of rank 1
Each value represents the occurrences for a given bin (range) of
values.
"""
# comment: probably obsoleted by numpy.histogram()
n = np.searchsorted(np.sort(a), bins)
n = np.concatenate([n, [len(a)]])
return n[1:] - n[:-1]
HistogramResult = namedtuple('HistogramResult',
('count', 'lowerlimit', 'binsize', 'extrapoints'))
@np.deprecate(message=("scipy.stats.histogram is deprecated in scipy 0.17.0; "
"use np.histogram instead"))
def histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
# _histogram is used in relfreq/cumfreq, so need to keep it
res = _histogram(a, numbins=numbins, defaultlimits=defaultlimits,
weights=weights, printextras=printextras)
return res
def _histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
"""
Separates the range into several bins and returns the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
CumfreqResult = namedtuple('CumfreqResult',
('cumcount', 'lowerlimit', 'binsize',
'extrapoints'))
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
return CumfreqResult(cumhist, l, b, e)
RelfreqResult = namedtuple('RelfreqResult',
('frequency', 'lowerlimit', 'binsize',
'extrapoints'))
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
h = h / float(a.shape[0])
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""
Computes the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
return np.array(arrays)
@np.deprecate(message="scipy.stats.signaltonoise is deprecated in scipy 0.16.0")
def signaltonoise(a, axis=0, ddof=0):
"""
The signal-to-noise ratio of the input data.
Returns the signal-to-noise ratio of `a`, here defined as the mean
divided by the standard deviation.
Parameters
----------
a : array_like
An array_like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction for standard deviation. Default is 0.
Returns
-------
s2n : ndarray
The mean to standard deviation ratio(s) along `axis`, or 0 where the
standard deviation is 0.
"""
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m/sd)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""
Calculates the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std and np.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.sem(a, axis, ddof)
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def zscore(a, axis=0, ddof=0):
"""
Calculates the z score of each value in the sample, relative to the
sample mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of
input array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091,
... 0.1954, 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom
(``ddof=1``) to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
"""
a = np.asanyarray(a)
mns = a.mean(axis=axis)
sstd = a.std(axis=axis, ddof=ddof)
if axis and mns.ndim < a.ndim:
return ((a - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (a - mns) / sstd
def zmap(scores, compare, axis=0, ddof=0):
"""
Calculates the relative z-scores.
Returns an array of z-scores, i.e., scores that are standardized to
zero mean and unit variance, where mean and variance are calculated
from the comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
scores, compare = map(np.asanyarray, [scores, compare])
mns = compare.mean(axis=axis)
sstd = compare.std(axis=axis, ddof=ddof)
if axis and mns.ndim < compare.ndim:
return ((scores - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (scores - mns) / sstd
# Private dictionary initialized only once at module level
# See https://en.wikipedia.org/wiki/Robust_measures_of_scale
_scale_conversions = {'raw': 1.0,
'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)}
def iqr(x, axis=None, rng=(25, 75), scale='raw', nan_policy='propagate',
interpolation='linear', keepdims=False):
"""
Compute the interquartile range of the data along the specified
axis.
The interquartile range (IQR) is the difference between the 75th and
25th percentile of the data. It is a measure of the dispersion
similar to standard deviation or variance, but is much more robust
against outliers [2]_.
The ``rng`` parameter allows this function to compute other
percentile ranges than the actual IQR. For example, setting
``rng=(0, 100)`` is equivalent to `numpy.ptp`.
The IQR of an empty array is `np.nan`.
.. versionadded:: 0.18.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the range is computed. The default is to
compute the IQR for the entire array.
rng : Two-element sequence containing floats in range of [0,100] optional
Percentiles over which to compute the range. Each must be
between 0 and 100, inclusive. The default is the true IQR:
`(25, 75)`. The order of the elements is not important.
scale : scalar or str, optional
The numerical value of scale will be divided out of the final
result. The following string values are recognized:
'raw' : No scaling, just return the raw IQR.
'normal' : Scale by :math:`2 \\sqrt{2} erf^{-1}(\\frac{1}{2}) \\approx 1.349`.
The default is 'raw'. Array-like scale is also allowed, as long
as it broadcasts correctly to the output such that
``out / scale`` is a valid operation. The output dimensions
depend on the input array, `x`, the `axis` argument, and the
`keepdims` flag.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate'
returns nan, 'raise' throws an error, 'omit' performs the
calculations ignoring nan values. Default is 'propagate'.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}, optional
Specifies the interpolation method to use when the percentile
boundaries lie between two data points `i` and `j`:
* 'linear' : `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* 'lower' : `i`.
* 'higher' : `j`.
* 'nearest' : `i` or `j` whichever is nearest.
* 'midpoint' : `(i + j) / 2`.
Default is 'linear'.
keepdims : bool, optional
If this is set to `True`, the reduced axes are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array `x`.
Returns
-------
iqr : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var
Examples
--------
>>> from scipy.stats import iqr
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> iqr(x)
4.0
>>> iqr(x, axis=0)
array([ 3.5, 2.5, 1.5])
>>> iqr(x, axis=1)
array([ 3., 1.])
>>> iqr(x, axis=1, keepdims=True)
array([[ 3.],
[ 1.]])
Notes
-----
This function is heavily dependent on the version of `numpy` that is
installed. Versions greater than 1.11.0b3 are highly recommended, as they
include a number of enhancements and fixes to `numpy.percentile` and
`numpy.nanpercentile` that affect the operation of this function. The
following modifications apply:
Below 1.10.0 : `nan_policy` is poorly defined.
The default behavior of `numpy.percentile` is used for 'propagate'. This
is a hybrid of 'omit' and 'propagate' that mostly yields a skewed
version of 'omit' since NaNs are sorted to the end of the data. A
warning is raised if there are NaNs in the data.
Below 1.9.0: `numpy.nanpercentile` does not exist.
This means that `numpy.percentile` is used regardless of `nan_policy`
and a warning is issued. See previous item for a description of the
behavior.
Below 1.9.0: `keepdims` and `interpolation` are not supported.
The keywords get ignored with a warning if supplied with non-default
values. However, multiple axes are still supported.
References
----------
.. [1] "Interquartile range" https://en.wikipedia.org/wiki/Interquartile_range
.. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale
.. [3] "Quantile" https://en.wikipedia.org/wiki/Quantile
"""
x = asarray(x)
# This check prevents percentile from raising an error later. Also, it is
# consistent with `np.var` and `np.std`.
if not x.size:
return np.nan
# An error may be raised here, so fail-fast, before doing lengthy
# computations, even though `scale` is not used until later
if isinstance(scale, string_types):
scale_key = scale.lower()
if scale_key not in _scale_conversions:
raise ValueError("{0} not a valid scale for `iqr`".format(scale))
scale = _scale_conversions[scale_key]
# Select the percentile function to use based on nans and policy
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan and nan_policy == 'omit':
percentile_func = _iqr_nanpercentile
else:
percentile_func = _iqr_percentile
if len(rng) != 2:
raise TypeError("quantile range must be two element sequence")
rng = sorted(rng)
pct = percentile_func(x, rng, axis=axis, interpolation=interpolation,
keepdims=keepdims, contains_nan=contains_nan)
out = np.subtract(pct[1], pct[0])
if scale != 1.0:
out /= scale
return out
def _iqr_percentile(x, q, axis=None, interpolation='linear', keepdims=False, contains_nan=False):
"""
Private wrapper that works around older versions of `numpy`.
While this function is pretty much necessary for the moment, it
should be removed as soon as the minimum supported numpy version
allows.
"""
if contains_nan and NumpyVersion(np.__version__) < '1.10.0a':
# I see no way to avoid the version check to ensure that the corrected
# NaN behavior has been implemented except to call `percentile` on a
# small array.
msg = "Keyword nan_policy='propagate' not correctly supported for " \
"numpy versions < 1.10.x. The default behavior of " \
"`numpy.percentile` will be used."
warnings.warn(msg, RuntimeWarning)
try:
# For older versions of numpy, there are two things that can cause a
# problem here: missing keywords and non-scalar axis. The former can be
# partially handled with a warning, the latter can be handled fully by
# hacking in an implementation similar to numpy's function for
# providing multi-axis functionality
# (`numpy.lib.function_base._ureduce` for the curious).
result = np.percentile(x, q, axis=axis, keepdims=keepdims,
interpolation=interpolation)
except TypeError:
if interpolation != 'linear' or keepdims:
# At time or writing, this means np.__version__ < 1.9.0
warnings.warn("Keywords interpolation and keepdims not supported "
"for your version of numpy", RuntimeWarning)
try:
# Special processing if axis is an iterable
original_size = len(axis)
except TypeError:
# Axis is a scalar at this point
pass
else:
axis = np.unique(np.asarray(axis) % x.ndim)
if original_size > axis.size:
# mimic numpy if axes are duplicated
raise ValueError("duplicate value in axis")
if axis.size == x.ndim:
# axis includes all axes: revert to None
axis = None
elif axis.size == 1:
# no rolling necessary
axis = axis[0]
else:
# roll multiple axes to the end and flatten that part out
for ax in axis[::-1]:
x = np.rollaxis(x, ax, x.ndim)
x = x.reshape(x.shape[:-axis.size] +
(np.prod(x.shape[-axis.size:]),))
axis = -1
result = np.percentile(x, q, axis=axis)
return result
def _iqr_nanpercentile(x, q, axis=None, interpolation='linear', keepdims=False, contains_nan=False):
"""
Private wrapper that works around the following:
1. A bug in `np.nanpercentile` that was around until numpy version
1.11.0.
2. A bug in `np.percentile` NaN handling that was fixed in numpy
version 1.10.0.
3. The non-existence of `np.nanpercentile` before numpy version
1.9.0.
While this function is pretty much necessary for the moment, it
should be removed as soon as the minimum supported numpy version
allows.
"""
if hasattr(np, 'nanpercentile'):
# At time or writing, this means np.__version__ < 1.9.0
result = np.nanpercentile(x, q, axis=axis,
interpolation=interpolation, keepdims=keepdims)
# If non-scalar result and nanpercentile does not do proper axis roll.
# I see no way of avoiding the version test since dimensions may just
# happen to match in the data.
if result.ndim > 1 and NumpyVersion(np.__version__) < '1.11.0a':
axis = np.asarray(axis)
if axis.size == 1:
# If only one axis specified, reduction happens along that dimension
if axis.ndim == 0:
axis = axis[None]
result = np.rollaxis(result, axis[0])
else:
# If multiple axes, reduced dimeision is last
result = np.rollaxis(result, -1)
else:
msg = "Keyword nan_policy='omit' not correctly supported for numpy " \
"versions < 1.9.x. The default behavior of numpy.percentile " \
"will be used."
warnings.warn(msg, RuntimeWarning)
result = _iqr_percentile(x, q, axis=axis)
return result
#####################################
# TRIMMING FUNCTIONS #
#####################################
@np.deprecate(message="stats.threshold is deprecated in scipy 0.17.0")
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""
Clip array to a given value.
Similar to numpy.clip(), except that values less than `threshmin` or
greater than `threshmax` are replaced by `newval`, instead of by
`threshmin` and `threshmax` respectively.
Parameters
----------
a : array_like
Data to threshold.
threshmin : float, int or None, optional
Minimum threshold, defaults to None.
threshmax : float, int or None, optional
Maximum threshold, defaults to None.
newval : float or int, optional
Value to put in place of values in `a` outside of bounds.
Defaults to 0.
Returns
-------
out : ndarray
The clipped input array, with values less than `threshmin` or
greater than `threshmax` replaced with `newval`.
Examples
--------
>>> a = np.array([9, 9, 6, 3, 1, 6, 1, 0, 0, 8])
>>> from scipy import stats
>>> stats.threshold(a, threshmin=2, threshmax=8, newval=-1)
array([-1, -1, 6, 3, -1, 6, -1, -1, -1, 8])
"""
a = asarray(a).copy()
mask = zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin)
if threshmax is not None:
mask |= (a > threshmax)
a[mask] = newval
return a
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper'))
def sigmaclip(a, low=4., high=4.):
"""
Iterative sigma-clipping of array elements.
The output array contains only those elements of the input array `c`
that satisfy the conditions ::
mean(c) - std(c)*low < c < mean(c) + std(c)*high
Starting from the full sample, all elements outside the critical range are
removed. The iteration continues with a new critical range until no
elements are outside the range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std*low
critupper = c_mean + c_std*high
c = c[(c > critlower) & (c < critupper)]
delta = size - c.size
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""
Slices off a proportion of items from both ends of an array.
Slices off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slices off less if proportion results in a non-integer slice index (i.e.,
conservatively slices off`proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[sl]
def trim1(a, proportiontocut, tail='right', axis=0):
"""
Slices off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slices off less if proportion results in a non-integer slice index
(i.e., conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
return atmp[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""
Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of both tails of the distribution
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[sl], axis=axis)
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
def f_oneway(*args):
"""
Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
.. [3] McDonald, G. H. "Handbook of Biological Statistics", One-way ANOVA.
http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> import scipy.stats as stats
[3]_ Here are some data on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> stats.f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
(7.1210194716424473, 0.00028122423145345439)
"""
args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
alldata = np.concatenate(args)
bign = len(alldata)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean()
alldata -= offset
sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / float(bign))
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset) / float(len(a))
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= (_square_of_sums(alldata) / float(bign))
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
return F_onewayResult(f, prob)
def pearsonr(x, y):
"""
Calculates a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed, and not necessarily zero-mean.
Like other correlation coefficients, this one varies between -1 and +1
with 0 implying no correlation. Correlations of -1 or +1 imply an exact
linear relationship. Positive correlations imply that as x increases, so
does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : (N,) array_like
Input
y : (N,) array_like
Input
Returns
-------
r : float
Pearson's correlation coefficient
p-value : float
2-tailed p-value
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
"""
# x and y should have same length.
x = np.asarray(x)
y = np.asarray(y)
n = len(x)
mx = x.mean()
my = y.mean()
xm, ym = x - mx, y - my
r_num = np.add.reduce(xm * ym)
r_den = np.sqrt(_sum_of_squares(xm) * _sum_of_squares(ym))
r = r_num / r_den
# Presumably, if abs(r) > 1, then it is only some small artifact of floating
# point arithmetic.
r = max(min(r, 1.0), -1.0)
df = n - 2
if abs(r) == 1.0:
prob = 0.0
else:
t_squared = r**2 * (df / ((1.0 - r) * (1.0 + r)))
prob = _betai(0.5*df, 0.5, df/(df+t_squared))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""Performs a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Which alternative hypothesis to the null hypothesis the test uses.
Default is 'two-sided'.
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
Notes
-----
The calculated odds ratio is different from the one R uses. This scipy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
For tables with large numbers, the (inexact) chi-square test implemented
in the function `chi2_contingency` can also be used.
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> import scipy.stats as stats
>>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1,0] > 0 and c[0,1] > 0:
oddsratio = c[0,0] * c[1,1] / float(c[1,0] * c[0,1])
else:
oddsratio = np.inf
n1 = c[0,0] + c[0,1]
n2 = c[1,0] + c[1,1]
n = c[0,0] + c[1,0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin lower/upper halves in two-sided
test.
"""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0,1], n1 + n2, n1, c[0,1] + c[1,1])
elif alternative == 'two-sided':
mode = int(float((n + 1) * (n1 + 1)) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0,0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0,0] < mode:
plower = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0,0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
if pvalue > 1.0:
pvalue = 1.0
return oddsratio, pvalue
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
def spearmanr(a, b=None, axis=0, nan_policy='propagate'):
"""
Calculates a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the monotonicity
of the relationship between two datasets. Unlike the Pearson correlation,
the Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact monotonic relationship. Positive correlations imply that
as x increases, so does y. Negative correlations imply that as x
increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in a and b
combined.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
that two sets of data are uncorrelated, has same dimension as rho.
Notes
-----
Changes in scipy 0.8.0: rewrite to add tie-handling, and axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
(0.82078268166812329, 0.088587005313543798)
>>> np.random.seed(1234321)
>>> x2n = np.random.randn(100, 2)
>>> y2n = np.random.randn(100, 2)
>>> stats.spearmanr(x2n)
(0.059969996999699973, 0.55338590803773591)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
(0.059969996999699973, 0.55338590803773591)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> pval
array([[ 0. , 0.55338591, 0.06435364, 0.53617935],
[ 0.55338591, 0. , 0.27592895, 0.80234077],
[ 0.06435364, 0.27592895, 0. , 0.73039992],
[ 0.53617935, 0.80234077, 0.73039992, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
(0.10816770419260482, 0.1273562188027364)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
(0.10816770419260482, 0.1273562188027364)
>>> xint = np.random.randint(10, size=(100, 2))
>>> stats.spearmanr(xint)
(0.052760927029710199, 0.60213045837062351)
"""
a, axisout = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.spearmanr(a, b, axis)
if a.size <= 1:
return SpearmanrResult(np.nan, np.nan)
ar = np.apply_along_axis(rankdata, axisout, a)
br = None
if b is not None:
b, axisout = _chk_asarray(b, axis)
contains_nan, nan_policy = _contains_nan(b, nan_policy)
if contains_nan and nan_policy == 'omit':
b = ma.masked_invalid(b)
return mstats_basic.spearmanr(a, b, axis)
br = np.apply_along_axis(rankdata, axisout, b)
n = a.shape[axisout]
rs = np.corrcoef(ar, br, rowvar=axisout)
olderr = np.seterr(divide='ignore') # rs can have elements equal to 1
try:
# clip the small negative values possibly caused by rounding
# errors before taking the square root
t = rs * np.sqrt(((n-2)/((rs+1.0)*(1.0-rs))).clip(0))
finally:
np.seterr(**olderr)
prob = 2 * distributions.t.sf(np.abs(t), n-2)
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
return SpearmanrResult(rs, prob)
PointbiserialrResult = namedtuple('PointbiserialrResult',
('correlation', 'pvalue'))
def pointbiserialr(x, y):
r"""
Calculates a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value
pvalue : float
2-tailed p-value
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr.`
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_{1}} -
\overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
are number of observations coded 0 and 1 respectively; :math:`N` is the
total number of observations and :math:`s_{y}` is the standard
deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] http://onlinelibrary.wiley.com/doi/10.1002/9781118445112.stat06227/full
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
rpb, prob = pearsonr(x, y)
return PointbiserialrResult(rpb, prob)
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
def kendalltau(x, y, initial_lexsort=None, nan_policy='propagate'):
"""
Calculates Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement. This is the tau-b version of Kendall's tau which
accounts for ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
initial_lexsort : bool, optional
Whether to use lexsort or quicksort as the sorting method for the
initial sort of the inputs. Default is lexsort (True), for which
`kendalltau` is of complexity O(n log(n)). If False, the complexity is
O(n^2), but with a smaller pre-factor (so quicksort may be faster for
small arrays).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
The definition of Kendall's tau that is used is::
tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U.
References
----------
W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association, Vol. 61,
No. 314, Part 1, pp. 436-439, 1966.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.24821309157521476
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
elif not x.size or not y.size:
return KendalltauResult(np.nan, np.nan) # Return NaN if arrays are empty
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == 'omit' or npy == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.kendalltau(x, y)
if initial_lexsort is not None: # deprecate to drop!
warnings.warn('"initial_lexsort" is gone!')
def count_rank_tie(ranks):
cnt = np.bincount(ranks).astype('int64', copy=False)
return (cnt * (cnt - 1) // 2).sum()
size = x.size
perm = np.argsort(y) # sort on y and convert y to dense ranks
x, y = x[perm], y[perm]
y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp)
# stable sort on x and convert x to dense ranks
perm = np.argsort(x, kind='mergesort')
x, y = x[perm], y[perm]
x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp)
con, dis = _kendall_condis(x, y) # concordant & discordant pairs
obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True]
cnt = np.diff(np.where(obs)[0]).astype('int64', copy=False)
ntie = (cnt * (cnt - 1) // 2).sum() # joint ties
xtie = count_rank_tie(x) - ntie # ties only in x
ytie = count_rank_tie(y) - ntie # ties only in y
if con + dis + xtie == 0 or con + dis + ytie == 0:
return KendalltauResult(np.nan, np.nan)
tau = (con - dis) / np.sqrt(con + dis + xtie) / np.sqrt(con + dis + ytie)
# what follows reproduces the ending of Gary Strangman's original
# stats.kendalltau() in SciPy
svar = (4.0 * size + 10.0) / (9.0 * size * (size - 1))
z = tau / np.sqrt(svar)
prob = special.erfc(np.abs(z) / 1.4142136)
return KendalltauResult(tau, prob)
#####################################
# INFERENTIAL STATISTICS #
#####################################
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate'):
"""
Calculates the T-test for the mean of ONE group of scores.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
sample observation
popmean : float or array_like
expected value in null hypothesis, if array_like than it must have the
same shape as `a` excluding the axis dimension
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Examples
--------
>>> from scipy import stats
>>> np.random.seed(7654567) # fix seed to get the same result
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2))
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs,5.0)
(array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674]))
>>> stats.ttest_1samp(rvs,0.0)
(array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999]))
Examples using axis and non-scalar dimension for population mean.
>>> stats.ttest_1samp(rvs,[5.0,0.0])
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1)
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs,[[5.0],[0.0]])
(array([[-0.68014479, -0.04323899],
[ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01],
[ 7.89094663e-03, 1.49986458e-04]]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / float(n))
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return Ttest_1sampResult(t, prob)
def _ttest_finish(df, t):
"""Common code between all 3 t-test functions."""
prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
with np.errstate(divide='ignore', invalid='ignore'):
df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2.0
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True):
"""
T-test for means of two independent samples from descriptive statistics.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
Returns
-------
statistic : float or array
The calculated t-statistics
pvalue : float or array
The two-tailed p-value.
See also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
"""
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
res = _ttest_ind_from_stats(mean1, mean2, denom, df)
return Ttest_indResult(*res)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate'):
"""
Calculates the T-test for the means of *two independent* samples of scores.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The two-tailed p-value.
Notes
-----
We can use this test, if we observe two independent samples from
the same or different population, e.g. exam scores of boys and
girls or of two ethnic groups. The test measures whether the
average (expected) value differs significantly across samples. If
we observe a large p-value, for example larger than 0.05 or 0.1,
then we cannot reject the null hypothesis of identical average scores.
If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,
then we reject the null hypothesis of equal averages.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> stats.ttest_ind(rvs1,rvs2)
(0.26833823296239279, 0.78849443369564776)
>>> stats.ttest_ind(rvs1,rvs2, equal_var = False)
(0.26833823296239279, 0.78849452749500748)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500)
>>> stats.ttest_ind(rvs1, rvs3)
(-0.46580283298287162, 0.64145827413436174)
>>> stats.ttest_ind(rvs1, rvs3, equal_var = False)
(-0.46580283298287162, 0.64149646246569292)
When n1 != n2, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs4)
(-0.99882539442782481, 0.3182832709103896)
>>> stats.ttest_ind(rvs1, rvs4, equal_var = False)
(-0.69712570584654099, 0.48716927725402048)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs5)
(-1.4679669854490653, 0.14263895620529152)
>>> stats.ttest_ind(rvs1, rvs5, equal_var = False)
(-0.94365973617132992, 0.34744170334794122)
"""
a, b, axis = _chk2_asarray(a, b, axis)
# check both a and b
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.ttest_ind(a, b, axis, equal_var)
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(np.mean(a, axis), np.mean(b, axis), denom, df)
return Ttest_indResult(*res)
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
def ttest_rel(a, b, axis=0, nan_policy='propagate'):
"""
Calculates the T-test on TWO RELATED samples of scores, a and b.
This is a two-sided test for the null hypothesis that 2 related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Notes
-----
Examples for the use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
http://en.wikipedia.org/wiki/T-test#Dependent_t-test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) # fix random seed to get same numbers
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs2)
(0.24101764965300962, 0.80964043445811562)
>>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs3)
(-3.9995108708727933, 7.3082402191726459e-005)
"""
a, b, axis = _chk2_asarray(a, b, axis)
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
m = ma.mask_or(ma.getmask(a), ma.getmask(b))
aa = ma.array(a, mask=m, copy=True)
bb = ma.array(b, mask=m, copy=True)
return mstats_basic.ttest_rel(aa, bb, axis)
if a.shape[axis] != b.shape[axis]:
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
n = a.shape[axis]
df = float(n - 1)
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / float(n))
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return Ttest_relResult(t, prob)
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit.
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two-sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str, array or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
cdf : str or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If `rvs` is a string then `cdf` can be False or the same as `rvs`.
If a callable, that callable is used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less','greater'}, optional
Defines the alternative hypothesis (see explanation above).
Default is 'two-sided'.
mode : 'approx' (default) or 'asymp', optional
Defines the distribution used for calculating the p-value.
- 'approx' : use approximation to exact distribution of test statistic
- 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D-.
pvalue : float
One-tailed or two-tailed p-value.
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, ``G(x)<=F(x)``, resp. ``G(x)>=F(x)``.
Examples
--------
>>> from scipy import stats
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> stats.kstest('norm', False, N=100)
(0.058352892479417884, 0.88531190944151261)
The above lines are equivalent to:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.norm.rvs(size=100), 'norm')
(0.058352892479417884, 0.88531190944151261)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``:
>>> np.random.seed(987654321)
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> stats.kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> stats.kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Don't reject equal distribution against alternative hypothesis: greater
>>> stats.kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, string_types):
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError("if rvs is string, cdf has to be the "
"same distribution")
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size': N}
vals = np.sort(rvs(*args, **kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
if alternative in ['two-sided', 'greater']:
Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max()
if alternative == 'greater':
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N))
if alternative in ['two-sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return KstestResult(Dmin, distributions.ksone.sf(Dmin, N))
if alternative == 'two-sided':
D = np.max([Dplus, Dmin])
if mode == 'asymp':
return KstestResult(D, distributions.kstwobign.sf(D * np.sqrt(N)))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D * np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000:
return KstestResult(D, pval_two)
else:
return KstestResult(D, 2 * distributions.ksone.sf(D, N))
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""
Count the number of non-masked elements of an array.
This function behaves like np.ma.count(), but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
Power_divergenceResult = namedtuple('Power_divergenceResult',
('statistic', 'pvalue'))
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""
Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
`lambda_` gives the power in the Cressie-Read power divergence
statistic. The default is 1. For convenience, `lambda_` may be
assigned one of the following strings, in which case the
corresponding numerical value is used::
String Value Description
"pearson" 1 Pearson's chi-squared statistic.
In this case, the function is
equivalent to `stats.chisquare`.
"log-likelihood" 0 Log-likelihood ratio. Also known as
the G-test [3]_.
"freeman-tukey" -1/2 Freeman-Tukey statistic.
"mod-log-likelihood" -1 Modified log-likelihood ratio.
"neyman" -2 Neyman's statistic.
"cressie-read" 2/3 The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", http://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, string_types):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. Valid strings "
"are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
if f_exp is not None:
f_exp = np.atleast_1d(np.asanyarray(f_exp))
else:
# Compute the equivalent of
# f_exp = f_obs.mean(axis=axis, keepdims=True)
# Older versions of numpy do not have the 'keepdims' argument, so
# we have to do a little work to achieve the same result.
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = np.atleast_1d(f_obs.mean(axis=axis))
if axis is not None:
reduced_shape = list(f_obs.shape)
reduced_shape[axis] = 1
f_exp.shape = reduced_shape
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""
Calculates a one-way chi square test.
The chi square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
power_divergence
mstats.chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> from scipy.stats import chisquare
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
Ks_2sampResult = namedtuple('Ks_2sampResult', ('statistic', 'pvalue'))
def ks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
data1, data2 : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
statistic : float
KS statistic
pvalue : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
>>> stats.ks_2samp(rvs1, rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs3)
(0.10333333333333333, 0.14498781825751686)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
n1, = data1.shape
n2, = data2.shape
common_type = np.find_common_type([], [data1.dtype, data2.dtype])
if not (np.issubdtype(common_type, np.number) and
not np.issubdtype(common_type, np.complexfloating)):
raise ValueError('ks_2samp only accepts real inputs')
# nans, if any, are at the end after sorting.
if np.isnan(data1[-1]) or np.isnan(data2[-1]):
raise ValueError('ks_2samp only accepts non-nan inputs')
# Absolute KS distance can be computed (less efficiently) as follows:
# data_all = np.concatenate([data1, data2])
# d = np.max(np.abs(data1.searchsorted(data_all, side='right') / n1 -
# data2.searchsorted(data_all, side='right') / n2))
d = _stats.ks_2samp(np.asarray(data1, common_type),
np.asarray(data2, common_type))
en = np.sqrt(n1 * n2 / float(n1 + n2))
try:
prob = distributions.kstwobign.sf((en + 0.12 + 0.11 / en) * d)
except:
prob = 1.0
return Ks_2sampResult(d, prob)
def tiecorrect(rankvals):
"""
Tie correction factor for ties in the Mann-Whitney U and
Kruskal-Wallis H tests.
Parameters
----------
rankvals : array_like
A 1-D sequence of ranks. Typically this will be the array
returned by `stats.rankdata`.
Returns
-------
factor : float
Correction factor for U or H.
See Also
--------
rankdata : Assign ranks to the data
mannwhitneyu : Mann-Whitney rank test
kruskal : Kruskal-Wallis H test
References
----------
.. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill.
Examples
--------
>>> from scipy.stats import tiecorrect, rankdata
>>> tiecorrect([1, 2.5, 2.5, 4])
0.9
>>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4])
>>> ranks
array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5])
>>> tiecorrect(ranks)
0.9833333333333333
"""
arr = np.sort(rankvals)
idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0]
cnt = np.diff(idx).astype(np.float64)
size = np.float64(arr.size)
return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size)
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', 'pvalue'))
def mannwhitneyu(x, y, use_continuity=True, alternative=None):
"""
Computes the Mann-Whitney rank test on samples x and y.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into
account. Default is True.
alternative : None (deprecated), 'less', 'two-sided', or 'greater'
Whether to get the p-value for the one-sided hypothesis ('less'
or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to None, which results in a p-value half the size of
the 'two-sided' p-value and a different U statistic. The
default behavior is not the same as using 'less' or 'greater':
it only exists for backward compatibility and is deprecated.
Returns
-------
statistic : float
The Mann-Whitney U statistic, equal to min(U for x, U for y) if
`alternative` is equal to None (deprecated; exists for backward
compatibility), and U for y otherwise.
pvalue : float
p-value assuming an asymptotic normal distribution. One-sided or
two-sided, depending on the choice of `alternative`.
Notes
-----
Use only when the number of observation in each sample is > 20 and
you have 2 independent samples of ranks. Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
This test corrects for ties and by default uses a continuity correction.
"""
if alternative is None:
warnings.warn("Calling `mannwhitneyu` without specifying "
"`alternative` is deprecated.", DeprecationWarning)
x = np.asarray(x)
y = np.asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in mannwhitneyu')
sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
meanrank = n1*n2/2.0 + 0.5 * use_continuity
if alternative is None or alternative == 'two-sided':
bigu = max(u1, u2)
elif alternative == 'less':
bigu = u1
elif alternative == 'greater':
bigu = u2
else:
raise ValueError("alternative should be None, 'less', 'greater' "
"or 'two-sided'")
z = (bigu - meanrank) / sd
if alternative is None:
# This behavior, equal to half the size of the two-sided
# p-value, is deprecated.
p = distributions.norm.sf(abs(z))
elif alternative == 'two-sided':
p = 2 * distributions.norm.sf(abs(z))
else:
p = distributions.norm.sf(z)
u = u2
# This behavior is deprecated.
if alternative is None:
u = min(u1, u2)
return MannwhitneyuResult(u, p)
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
def ranksums(x, y):
"""
Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed
pvalue : float
The two-sided p-value of the test
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2 * distributions.norm.sf(abs(z))
return RanksumsResult(z, prob)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
def kruskal(*args, **kwargs):
"""
Compute the Kruskal-Wallis H-test for independent samples
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post-hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution
See Also
--------
f_oneway : 1-way ANOVA
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] http://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.27272727272727337, pvalue=0.60150813444058948)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.030197383422318501)
"""
args = list(map(np.asarray, args))
num_groups = len(args)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
for arg in args:
if arg.size == 0:
return KruskalResult(np.nan, np.nan)
n = np.asarray(list(map(len, args)))
if 'nan_policy' in kwargs.keys():
if kwargs['nan_policy'] not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', "
"'raise' or'omit'")
else:
nan_policy = kwargs['nan_policy']
else:
nan_policy = 'propagate'
contains_nan = False
for arg in args:
cn = _contains_nan(arg, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for a in args:
a = ma.masked_invalid(a)
return mstats_basic.kruskal(*args)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(args)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / float(n[i])
totaln = np.sum(n)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
def friedmanchisquare(*args):
"""
Computes the Friedman test for repeated measurements
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
statistic : float
the test statistic, correcting for ties
pvalue : float
the associated p-value assuming that the test statistic has a chi
squared distribution
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] http://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n')
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / float(k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Methods for combining the p-values of independent tests bearing upon the
same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'stouffer'}, optional
Name of method to use to combine p-values. The following methods are
available:
- "fisher": Fisher's method (Fisher's combined probability test),
the default.
- "stouffer": Stouffer's Z-score method.
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method:
- "fisher": The chi-squared statistic
- "stouffer": The Z-score
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [3]_ [4]_.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] http://en.wikipedia.org/wiki/Fisher's_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [4] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [5] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
Xsq = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(Xsq, 2 * len(pvalues))
return (Xsq, pval)
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
Z = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(Z)
return (Z, pval)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher' or 'stouffer'", method)
#####################################
# PROBABILITY CALCULATIONS #
#####################################
@np.deprecate(message="stats.chisqprob is deprecated in scipy 0.17.0; "
"use stats.distributions.chi2.sf instead.")
def chisqprob(chisq, df):
"""
Probability value (1-tail) for the Chi^2 probability distribution.
Broadcasting rules apply.
Parameters
----------
chisq : array_like or float > 0
df : array_like or float, probably int >= 1
Returns
-------
chisqprob : ndarray
The area from `chisq` to infinity under the Chi^2 probability
distribution with degrees of freedom `df`.
"""
return distributions.chi2.sf(chisq, df)
@np.deprecate(message="stats.betai is deprecated in scipy 0.17.0; "
"use special.betainc instead")
def betai(a, b, x):
"""
Returns the incomplete beta function.
I_x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a.
The standard broadcasting rules apply to a, b, and x.
Parameters
----------
a : array_like or float > 0
b : array_like or float > 0
x : array_like or float
x will be clipped to be no greater than 1.0 .
Returns
-------
betai : ndarray
Incomplete beta function.
"""
return _betai(a, b, x)
def _betai(a, b, x):
x = np.asarray(x)
x = np.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
#####################################
# ANOVA CALCULATIONS #
#####################################
@np.deprecate(message="stats.f_value_wilks_lambda deprecated in scipy 0.17.0")
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivarite data, per
Maxwell & Delaney p.657.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
lmbda = linalg.det(EF) / linalg.det(ER)
if (a-1)**2 + (b-1)**2 == 5:
q = 1
else:
q = np.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
@np.deprecate(message="stats.f_value deprecated in scipy 0.17.0")
def f_value(ER, EF, dfR, dfF):
"""
Returns an F-statistic for a restricted vs. unrestricted model.
Parameters
----------
ER : float
`ER` is the sum of squared residuals for the restricted model
or null hypothesis
EF : float
`EF` is the sum of squared residuals for the unrestricted model
or alternate hypothesis
dfR : int
`dfR` is the degrees of freedom in the restricted model
dfF : int
`dfF` is the degrees of freedom in the unrestricted model
Returns
-------
F-statistic : float
"""
return (ER - EF) / float(dfR - dfF) / (EF / float(dfF))
@np.deprecate(message="stats.f_value_multivariate deprecated in scipy 0.17.0")
def f_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns a multivariate F-statistic.
Parameters
----------
ER : ndarray
Error associated with the null hypothesis (the Restricted model).
From a multivariate F calculation.
EF : ndarray
Error associated with the alternate hypothesis (the Full model)
From a multivariate F calculation.
dfnum : int
Degrees of freedom the Restricted model.
dfden : int
Degrees of freedom associated with the Restricted model.
Returns
-------
fstat : float
The computed F-statistic.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
n_um = (linalg.det(ER) - linalg.det(EF)) / float(dfnum)
d_en = linalg.det(EF) / float(dfden)
return n_um / d_en
#####################################
# SUPPORT FUNCTIONS #
#####################################
RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts'))
def find_repeats(arr):
"""
Find repeats and repeat counts.
Parameters
----------
arr : array_like
Input array. This is cast to float64.
Returns
-------
values : ndarray
The unique values from the (flattened) input that are repeated.
counts : ndarray
Number of times the corresponding 'value' is repeated.
Notes
-----
In numpy >= 1.9 `numpy.unique` provides similar functionality. The main
difference is that `find_repeats` only returns repeated values.
Examples
--------
>>> from scipy import stats
>>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])
RepeatedResults(values=array([ 2.]), counts=array([4]))
>>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
RepeatedResults(values=array([ 4., 5.]), counts=array([2, 2]))
"""
# Note: always copies.
return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64)))
@np.deprecate(message="scipy.stats.ss is deprecated in scipy 0.17.0")
def ss(a, axis=0):
return _sum_of_squares(a, axis)
def _sum_of_squares(a, axis=0):
"""
Squares each element of the input array, and returns the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
@np.deprecate(message="scipy.stats.square_of_sums is deprecated "
"in scipy 0.17.0")
def square_of_sums(a, axis=0):
return _square_of_sums(a, axis)
def _square_of_sums(a, axis=0):
"""
Sums elements of the input array, and returns the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
@np.deprecate(message="scipy.stats.fastsort is deprecated in scipy 0.16.0")
def fastsort(a):
"""
Sort an array and provide the argsort.
Parameters
----------
a : array_like
Input array.
Returns
-------
fastsort : ndarray of type int
sorted indices into the original array
"""
# TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
def rankdata(a, method='average'):
"""
rankdata(a, method='average')
Assign ranks to data, dealing with ties appropriately.
Ranks begin at 1. The `method` argument controls how ranks are assigned
to equal values. See [1]_ for further discussion of ranking methods.
Parameters
----------
a : array_like
The array of values to be ranked. The array is first flattened.
method : str, optional
The method used to assign ranks to tied elements.
The options are 'average', 'min', 'max', 'dense' and 'ordinal'.
'average':
The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
'min':
The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also
referred to as "competition" ranking.)
'max':
The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
'dense':
Like 'min', but the rank of the next highest element is assigned
the rank immediately after those assigned to the tied elements.
'ordinal':
All values are given a distinct rank, corresponding to the order
that the values occur in `a`.
The default is 'average'.
Returns
-------
ranks : ndarray
An array of length equal to the size of `a`, containing rank
scores.
References
----------
.. [1] "Ranking", http://en.wikipedia.org/wiki/Ranking
Examples
--------
>>> from scipy.stats import rankdata
>>> rankdata([0, 2, 3, 2])
array([ 1. , 2.5, 4. , 2.5])
>>> rankdata([0, 2, 3, 2], method='min')
array([ 1, 2, 4, 2])
>>> rankdata([0, 2, 3, 2], method='max')
array([ 1, 3, 4, 3])
>>> rankdata([0, 2, 3, 2], method='dense')
array([ 1, 2, 3, 2])
>>> rankdata([0, 2, 3, 2], method='ordinal')
array([ 1, 2, 4, 3])
"""
if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
raise ValueError('unknown method "{0}"'.format(method))
arr = np.ravel(np.asarray(a))
algo = 'mergesort' if method == 'ordinal' else 'quicksort'
sorter = np.argsort(arr, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'ordinal':
return inv + 1
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
if method == 'dense':
return dense
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == 'max':
return count[dense]
if method == 'min':
return count[dense - 1] + 1
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
| bsd-3-clause |
wandb/client | tests/functional_tests/t0_main/artifacts/use-and-link-model.py | 1 | 1248 | import torch
import torch.nn as nn
import torch.nn.functional as F # noqa
import wandb
from wandb.beta.workflows import link_model, use_model
from wandb.data_types import _SavedModel
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def main():
run = wandb.init()
my_model = Net()
sm = _SavedModel.init(my_model)
art = wandb.Artifact("my-model", "model")
art.add(sm, "index")
art = run.log_artifact(art)
art.wait()
sm = use_model("my-model:latest")
link_model(sm, "entity/project/test_portfolio")
run.finish()
if __name__ == "__main__":
main()
| mit |
espg/scikit-learn | sklearn/impute/_base.py | 7 | 38334 | # Authors: Nicolas Tresegnie <nicolas.tresegnie@gmail.com>
# Sergey Feldman <sergeyfeldman@gmail.com>
# License: BSD 3 clause
import numbers
import warnings
from collections import Counter
import numpy as np
import numpy.ma as ma
from scipy import sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils._param_validation import StrOptions, Hidden
from ..utils.fixes import _mode
from ..utils.sparsefuncs import _get_median
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..utils.validation import _check_feature_names_in
from ..utils._mask import _get_mask
from ..utils import _is_pandas_na
from ..utils import is_scalar_nan
def _check_inputs_dtype(X, missing_values):
if _is_pandas_na(missing_values):
# Allow using `pd.NA` as missing values to impute numerical arrays.
return
if X.dtype.kind in ("f", "i", "u") and not isinstance(missing_values, numbers.Real):
raise ValueError(
"'X' and 'missing_values' types are expected to be"
" both numerical. Got X.dtype={} and "
" type(missing_values)={}.".format(X.dtype, type(missing_values))
)
def _most_frequent(array, extra_value, n_repeat):
"""Compute the most frequent value in a 1d array extended with
[extra_value] * n_repeat, where extra_value is assumed to be not part
of the array."""
# Compute the most frequent value in array only
if array.size > 0:
if array.dtype == object:
# scipy.stats.mode is slow with object dtype array.
# Python Counter is more efficient
counter = Counter(array)
most_frequent_count = counter.most_common(1)[0][1]
# tie breaking similarly to scipy.stats.mode
most_frequent_value = min(
value
for value, count in counter.items()
if count == most_frequent_count
)
else:
mode = _mode(array)
most_frequent_value = mode[0][0]
most_frequent_count = mode[1][0]
else:
most_frequent_value = 0
most_frequent_count = 0
# Compare to array + [extra_value] * n_repeat
if most_frequent_count == 0 and n_repeat == 0:
return np.nan
elif most_frequent_count < n_repeat:
return extra_value
elif most_frequent_count > n_repeat:
return most_frequent_value
elif most_frequent_count == n_repeat:
# tie breaking similarly to scipy.stats.mode
return min(most_frequent_value, extra_value)
class _BaseImputer(TransformerMixin, BaseEstimator):
"""Base class for all imputers.
It adds automatically support for `add_indicator`.
"""
_parameter_constraints: dict = {
"missing_values": ["missing_values"],
"add_indicator": ["boolean"],
}
def __init__(self, *, missing_values=np.nan, add_indicator=False):
self.missing_values = missing_values
self.add_indicator = add_indicator
def _fit_indicator(self, X):
"""Fit a MissingIndicator."""
if self.add_indicator:
self.indicator_ = MissingIndicator(
missing_values=self.missing_values, error_on_new=False
)
self.indicator_._fit(X, precomputed=True)
else:
self.indicator_ = None
def _transform_indicator(self, X):
"""Compute the indicator mask.'
Note that X must be the original data as passed to the imputer before
any imputation, since imputation may be done inplace in some cases.
"""
if self.add_indicator:
if not hasattr(self, "indicator_"):
raise ValueError(
"Make sure to call _fit_indicator before _transform_indicator"
)
return self.indicator_.transform(X)
def _concatenate_indicator(self, X_imputed, X_indicator):
"""Concatenate indicator mask with the imputed data."""
if not self.add_indicator:
return X_imputed
hstack = sp.hstack if sp.issparse(X_imputed) else np.hstack
if X_indicator is None:
raise ValueError(
"Data from the missing indicator are not provided. Call "
"_fit_indicator and _transform_indicator in the imputer "
"implementation."
)
return hstack((X_imputed, X_indicator))
def _concatenate_indicator_feature_names_out(self, names, input_features):
if not self.add_indicator:
return names
indicator_names = self.indicator_.get_feature_names_out(input_features)
return np.concatenate([names, indicator_names])
def _more_tags(self):
return {"allow_nan": is_scalar_nan(self.missing_values)}
class SimpleImputer(_BaseImputer):
"""Univariate imputer for completing missing values with simple strategies.
Replace missing values using a descriptive statistic (e.g. mean, median, or
most frequent) along each column, or using a constant value.
Read more in the :ref:`User Guide <impute>`.
.. versionadded:: 0.20
`SimpleImputer` replaces the previous `sklearn.preprocessing.Imputer`
estimator which is now removed.
Parameters
----------
missing_values : int, float, str, np.nan, None or pandas.NA, default=np.nan
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed. For pandas' dataframes with
nullable integer dtypes with missing values, `missing_values`
can be set to either `np.nan` or `pd.NA`.
strategy : str, default='mean'
The imputation strategy.
- If "mean", then replace missing values using the mean along
each column. Can only be used with numeric data.
- If "median", then replace missing values using the median along
each column. Can only be used with numeric data.
- If "most_frequent", then replace missing using the most frequent
value along each column. Can be used with strings or numeric data.
If there is more than one such value, only the smallest is returned.
- If "constant", then replace missing values with fill_value. Can be
used with strings or numeric data.
.. versionadded:: 0.20
strategy="constant" for fixed value imputation.
fill_value : str or numerical value, default=None
When strategy == "constant", fill_value is used to replace all
occurrences of missing_values.
If left to the default, fill_value will be 0 when imputing numerical
data and "missing_value" for strings or object data types.
verbose : int, default=0
Controls the verbosity of the imputer.
.. deprecated:: 1.1
The 'verbose' parameter was deprecated in version 1.1 and will be
removed in 1.3. A warning will always be raised upon the removal of
empty columns in the future version.
copy : bool, default=True
If True, a copy of X will be created. If False, imputation will
be done in-place whenever possible. Note that, in the following cases,
a new copy will always be made, even if `copy=False`:
- If `X` is not an array of floating values;
- If `X` is encoded as a CSR matrix;
- If `add_indicator=True`.
add_indicator : bool, default=False
If True, a :class:`MissingIndicator` transform will stack onto output
of the imputer's transform. This allows a predictive estimator
to account for missingness despite imputation. If a feature has no
missing values at fit/train time, the feature won't appear on
the missing indicator even if there are missing values at
transform/test time.
Attributes
----------
statistics_ : array of shape (n_features,)
The imputation fill value for each feature.
Computing statistics can result in `np.nan` values.
During :meth:`transform`, features corresponding to `np.nan`
statistics will be discarded.
indicator_ : :class:`~sklearn.impute.MissingIndicator`
Indicator used to add binary indicators for missing values.
`None` if `add_indicator=False`.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
IterativeImputer : Multivariate imputer that estimates values to impute for
each feature with missing values from all the others.
KNNImputer : Multivariate imputer that estimates missing features using
nearest samples.
Notes
-----
Columns which only contained missing values at :meth:`fit` are discarded
upon :meth:`transform` if strategy is not `"constant"`.
In a prediction context, simple imputation usually performs poorly when
associated with a weak learner. However, with a powerful learner, it can
lead to as good or better performance than complex imputation such as
:class:`~sklearn.impute.IterativeImputer` or :class:`~sklearn.impute.KNNImputer`.
Examples
--------
>>> import numpy as np
>>> from sklearn.impute import SimpleImputer
>>> imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
>>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]])
SimpleImputer()
>>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]]
>>> print(imp_mean.transform(X))
[[ 7. 2. 3. ]
[ 4. 3.5 6. ]
[10. 3.5 9. ]]
"""
_parameter_constraints: dict = {
**_BaseImputer._parameter_constraints,
"strategy": [StrOptions({"mean", "median", "most_frequent", "constant"})],
"fill_value": "no_validation", # any object is valid
"verbose": ["verbose", Hidden(StrOptions({"deprecated"}))],
"copy": ["boolean"],
}
def __init__(
self,
*,
missing_values=np.nan,
strategy="mean",
fill_value=None,
verbose="deprecated",
copy=True,
add_indicator=False,
):
super().__init__(missing_values=missing_values, add_indicator=add_indicator)
self.strategy = strategy
self.fill_value = fill_value
self.verbose = verbose
self.copy = copy
def _validate_input(self, X, in_fit):
if self.strategy in ("most_frequent", "constant"):
# If input is a list of strings, dtype = object.
# Otherwise ValueError is raised in SimpleImputer
# with strategy='most_frequent' or 'constant'
# because the list is converted to Unicode numpy array
if isinstance(X, list) and any(
isinstance(elem, str) for row in X for elem in row
):
dtype = object
else:
dtype = None
else:
dtype = FLOAT_DTYPES
if not in_fit and self._fit_dtype.kind == "O":
# Use object dtype if fitted on object dtypes
dtype = self._fit_dtype
if _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values):
force_all_finite = "allow-nan"
else:
force_all_finite = True
try:
X = self._validate_data(
X,
reset=in_fit,
accept_sparse="csc",
dtype=dtype,
force_all_finite=force_all_finite,
copy=self.copy,
)
except ValueError as ve:
if "could not convert" in str(ve):
new_ve = ValueError(
"Cannot use {} strategy with non-numeric data:\n{}".format(
self.strategy, ve
)
)
raise new_ve from None
else:
raise ve
if in_fit:
# Use the dtype seen in `fit` for non-`fit` conversion
self._fit_dtype = X.dtype
_check_inputs_dtype(X, self.missing_values)
if X.dtype.kind not in ("i", "u", "f", "O"):
raise ValueError(
"SimpleImputer does not support data with dtype "
"{0}. Please provide either a numeric array (with"
" a floating point or integer dtype) or "
"categorical data represented either as an array "
"with integer dtype or an array of string values "
"with an object dtype.".format(X.dtype)
)
return X
def fit(self, X, y=None):
"""Fit the imputer on `X`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
if self.verbose != "deprecated":
warnings.warn(
"The 'verbose' parameter was deprecated in version "
"1.1 and will be removed in 1.3. A warning will "
"always be raised upon the removal of empty columns "
"in the future version.",
FutureWarning,
)
X = self._validate_input(X, in_fit=True)
# default fill_value is 0 for numerical input and "missing_value"
# otherwise
if self.fill_value is None:
if X.dtype.kind in ("i", "u", "f"):
fill_value = 0
else:
fill_value = "missing_value"
else:
fill_value = self.fill_value
# fill_value should be numerical in case of numerical input
if (
self.strategy == "constant"
and X.dtype.kind in ("i", "u", "f")
and not isinstance(fill_value, numbers.Real)
):
raise ValueError(
"'fill_value'={0} is invalid. Expected a "
"numerical value when imputing numerical "
"data".format(fill_value)
)
if sp.issparse(X):
# missing_values = 0 not allowed with sparse data as it would
# force densification
if self.missing_values == 0:
raise ValueError(
"Imputation not possible when missing_values "
"== 0 and input is sparse. Provide a dense "
"array instead."
)
else:
self.statistics_ = self._sparse_fit(
X, self.strategy, self.missing_values, fill_value
)
else:
self.statistics_ = self._dense_fit(
X, self.strategy, self.missing_values, fill_value
)
return self
def _sparse_fit(self, X, strategy, missing_values, fill_value):
"""Fit the transformer on sparse data."""
missing_mask = _get_mask(X, missing_values)
mask_data = missing_mask.data
n_implicit_zeros = X.shape[0] - np.diff(X.indptr)
statistics = np.empty(X.shape[1])
if strategy == "constant":
# for constant strategy, self.statistcs_ is used to store
# fill_value in each column
statistics.fill(fill_value)
else:
for i in range(X.shape[1]):
column = X.data[X.indptr[i] : X.indptr[i + 1]]
mask_column = mask_data[X.indptr[i] : X.indptr[i + 1]]
column = column[~mask_column]
# combine explicit and implicit zeros
mask_zeros = _get_mask(column, 0)
column = column[~mask_zeros]
n_explicit_zeros = mask_zeros.sum()
n_zeros = n_implicit_zeros[i] + n_explicit_zeros
if strategy == "mean":
s = column.size + n_zeros
statistics[i] = np.nan if s == 0 else column.sum() / s
elif strategy == "median":
statistics[i] = _get_median(column, n_zeros)
elif strategy == "most_frequent":
statistics[i] = _most_frequent(column, 0, n_zeros)
super()._fit_indicator(missing_mask)
return statistics
def _dense_fit(self, X, strategy, missing_values, fill_value):
"""Fit the transformer on dense data."""
missing_mask = _get_mask(X, missing_values)
masked_X = ma.masked_array(X, mask=missing_mask)
super()._fit_indicator(missing_mask)
# Mean
if strategy == "mean":
mean_masked = np.ma.mean(masked_X, axis=0)
# Avoid the warning "Warning: converting a masked element to nan."
mean = np.ma.getdata(mean_masked)
mean[np.ma.getmask(mean_masked)] = np.nan
return mean
# Median
elif strategy == "median":
median_masked = np.ma.median(masked_X, axis=0)
# Avoid the warning "Warning: converting a masked element to nan."
median = np.ma.getdata(median_masked)
median[np.ma.getmaskarray(median_masked)] = np.nan
return median
# Most frequent
elif strategy == "most_frequent":
# Avoid use of scipy.stats.mstats.mode due to the required
# additional overhead and slow benchmarking performance.
# See Issue 14325 and PR 14399 for full discussion.
# To be able access the elements by columns
X = X.transpose()
mask = missing_mask.transpose()
if X.dtype.kind == "O":
most_frequent = np.empty(X.shape[0], dtype=object)
else:
most_frequent = np.empty(X.shape[0])
for i, (row, row_mask) in enumerate(zip(X[:], mask[:])):
row_mask = np.logical_not(row_mask).astype(bool)
row = row[row_mask]
most_frequent[i] = _most_frequent(row, np.nan, 0)
return most_frequent
# Constant
elif strategy == "constant":
# for constant strategy, self.statistcs_ is used to store
# fill_value in each column
return np.full(X.shape[1], fill_value, dtype=X.dtype)
def transform(self, X):
"""Impute all missing values in `X`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data to complete.
Returns
-------
X_imputed : {ndarray, sparse matrix} of shape \
(n_samples, n_features_out)
`X` with imputed values.
"""
check_is_fitted(self)
X = self._validate_input(X, in_fit=False)
statistics = self.statistics_
if X.shape[1] != statistics.shape[0]:
raise ValueError(
"X has %d features per sample, expected %d"
% (X.shape[1], self.statistics_.shape[0])
)
# compute mask before eliminating invalid features
missing_mask = _get_mask(X, self.missing_values)
# Delete the invalid columns if strategy is not constant
if self.strategy == "constant":
valid_statistics = statistics
valid_statistics_indexes = None
else:
# same as np.isnan but also works for object dtypes
invalid_mask = _get_mask(statistics, np.nan)
valid_mask = np.logical_not(invalid_mask)
valid_statistics = statistics[valid_mask]
valid_statistics_indexes = np.flatnonzero(valid_mask)
if invalid_mask.any():
invalid_features = np.arange(X.shape[1])[invalid_mask]
if self.verbose != "deprecated" and self.verbose:
# use feature names warning if features are provided
if hasattr(self, "feature_names_in_"):
invalid_features = self.feature_names_in_[invalid_features]
warnings.warn(
"Skipping features without any observed values:"
f" {invalid_features}. At least one non-missing value is needed"
f" for imputation with strategy='{self.strategy}'."
)
X = X[:, valid_statistics_indexes]
# Do actual imputation
if sp.issparse(X):
if self.missing_values == 0:
raise ValueError(
"Imputation not possible when missing_values "
"== 0 and input is sparse. Provide a dense "
"array instead."
)
else:
# if no invalid statistics are found, use the mask computed
# before, else recompute mask
if valid_statistics_indexes is None:
mask = missing_mask.data
else:
mask = _get_mask(X.data, self.missing_values)
indexes = np.repeat(
np.arange(len(X.indptr) - 1, dtype=int), np.diff(X.indptr)
)[mask]
X.data[mask] = valid_statistics[indexes].astype(X.dtype, copy=False)
else:
# use mask computed before eliminating invalid mask
if valid_statistics_indexes is None:
mask_valid_features = missing_mask
else:
mask_valid_features = missing_mask[:, valid_statistics_indexes]
n_missing = np.sum(mask_valid_features, axis=0)
values = np.repeat(valid_statistics, n_missing)
coordinates = np.where(mask_valid_features.transpose())[::-1]
X[coordinates] = values
X_indicator = super()._transform_indicator(missing_mask)
return super()._concatenate_indicator(X, X_indicator)
def inverse_transform(self, X):
"""Convert the data back to the original representation.
Inverts the `transform` operation performed on an array.
This operation can only be performed after :class:`SimpleImputer` is
instantiated with `add_indicator=True`.
Note that `inverse_transform` can only invert the transform in
features that have binary indicators for missing values. If a feature
has no missing values at `fit` time, the feature won't have a binary
indicator, and the imputation done at `transform` time won't be
inverted.
.. versionadded:: 0.24
Parameters
----------
X : array-like of shape \
(n_samples, n_features + n_features_missing_indicator)
The imputed data to be reverted to original data. It has to be
an augmented array of imputed data and the missing indicator mask.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
The original `X` with missing values as it was prior
to imputation.
"""
check_is_fitted(self)
if not self.add_indicator:
raise ValueError(
"'inverse_transform' works only when "
"'SimpleImputer' is instantiated with "
"'add_indicator=True'. "
f"Got 'add_indicator={self.add_indicator}' "
"instead."
)
n_features_missing = len(self.indicator_.features_)
non_empty_feature_count = X.shape[1] - n_features_missing
array_imputed = X[:, :non_empty_feature_count].copy()
missing_mask = X[:, non_empty_feature_count:].astype(bool)
n_features_original = len(self.statistics_)
shape_original = (X.shape[0], n_features_original)
X_original = np.zeros(shape_original)
X_original[:, self.indicator_.features_] = missing_mask
full_mask = X_original.astype(bool)
imputed_idx, original_idx = 0, 0
while imputed_idx < len(array_imputed.T):
if not np.all(X_original[:, original_idx]):
X_original[:, original_idx] = array_imputed.T[imputed_idx]
imputed_idx += 1
original_idx += 1
else:
original_idx += 1
X_original[full_mask] = self.missing_values
return X_original
def _more_tags(self):
return {
"allow_nan": (
_is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values)
)
}
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
input_features = _check_feature_names_in(self, input_features)
non_missing_mask = np.logical_not(_get_mask(self.statistics_, np.nan))
names = input_features[non_missing_mask]
return self._concatenate_indicator_feature_names_out(names, input_features)
class MissingIndicator(TransformerMixin, BaseEstimator):
"""Binary indicators for missing values.
Note that this component typically should not be used in a vanilla
:class:`Pipeline` consisting of transformers and a classifier, but rather
could be added using a :class:`FeatureUnion` or :class:`ColumnTransformer`.
Read more in the :ref:`User Guide <impute>`.
.. versionadded:: 0.20
Parameters
----------
missing_values : int, float, str, np.nan or None, default=np.nan
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed. For pandas' dataframes with
nullable integer dtypes with missing values, `missing_values`
should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.
features : {'missing-only', 'all'}, default='missing-only'
Whether the imputer mask should represent all or a subset of
features.
- If `'missing-only'` (default), the imputer mask will only represent
features containing missing values during fit time.
- If `'all'`, the imputer mask will represent all features.
sparse : bool or 'auto', default='auto'
Whether the imputer mask format should be sparse or dense.
- If `'auto'` (default), the imputer mask will be of same type as
input.
- If `True`, the imputer mask will be a sparse matrix.
- If `False`, the imputer mask will be a numpy array.
error_on_new : bool, default=True
If `True`, :meth:`transform` will raise an error when there are
features with missing values that have no missing values in
:meth:`fit`. This is applicable only when `features='missing-only'`.
Attributes
----------
features_ : ndarray of shape (n_missing_features,) or (n_features,)
The features indices which will be returned when calling
:meth:`transform`. They are computed during :meth:`fit`. If
`features='all'`, `features_` is equal to `range(n_features)`.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
SimpleImputer : Univariate imputation of missing values.
IterativeImputer : Multivariate imputation of missing values.
Examples
--------
>>> import numpy as np
>>> from sklearn.impute import MissingIndicator
>>> X1 = np.array([[np.nan, 1, 3],
... [4, 0, np.nan],
... [8, 1, 0]])
>>> X2 = np.array([[5, 1, np.nan],
... [np.nan, 2, 3],
... [2, 4, 0]])
>>> indicator = MissingIndicator()
>>> indicator.fit(X1)
MissingIndicator()
>>> X2_tr = indicator.transform(X2)
>>> X2_tr
array([[False, True],
[ True, False],
[False, False]])
"""
_parameter_constraints: dict = {
"missing_values": [numbers.Real, numbers.Integral, str, None],
"features": [StrOptions({"missing-only", "all"})],
"sparse": ["boolean", StrOptions({"auto"})],
"error_on_new": ["boolean"],
}
def __init__(
self,
*,
missing_values=np.nan,
features="missing-only",
sparse="auto",
error_on_new=True,
):
self.missing_values = missing_values
self.features = features
self.sparse = sparse
self.error_on_new = error_on_new
def _get_missing_features_info(self, X):
"""Compute the imputer mask and the indices of the features
containing missing values.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input data with missing values. Note that `X` has been
checked in :meth:`fit` and :meth:`transform` before to call this
function.
Returns
-------
imputer_mask : {ndarray, sparse matrix} of shape \
(n_samples, n_features)
The imputer mask of the original data.
features_with_missing : ndarray of shape (n_features_with_missing)
The features containing missing values.
"""
if not self._precomputed:
imputer_mask = _get_mask(X, self.missing_values)
else:
imputer_mask = X
if sp.issparse(X):
imputer_mask.eliminate_zeros()
if self.features == "missing-only":
n_missing = imputer_mask.getnnz(axis=0)
if self.sparse is False:
imputer_mask = imputer_mask.toarray()
elif imputer_mask.format == "csr":
imputer_mask = imputer_mask.tocsc()
else:
if not self._precomputed:
imputer_mask = _get_mask(X, self.missing_values)
else:
imputer_mask = X
if self.features == "missing-only":
n_missing = imputer_mask.sum(axis=0)
if self.sparse is True:
imputer_mask = sp.csc_matrix(imputer_mask)
if self.features == "all":
features_indices = np.arange(X.shape[1])
else:
features_indices = np.flatnonzero(n_missing)
return imputer_mask, features_indices
def _validate_input(self, X, in_fit):
if not is_scalar_nan(self.missing_values):
force_all_finite = True
else:
force_all_finite = "allow-nan"
X = self._validate_data(
X,
reset=in_fit,
accept_sparse=("csc", "csr"),
dtype=None,
force_all_finite=force_all_finite,
)
_check_inputs_dtype(X, self.missing_values)
if X.dtype.kind not in ("i", "u", "f", "O"):
raise ValueError(
"MissingIndicator does not support data with "
"dtype {0}. Please provide either a numeric array"
" (with a floating point or integer dtype) or "
"categorical data represented either as an array "
"with integer dtype or an array of string values "
"with an object dtype.".format(X.dtype)
)
if sp.issparse(X) and self.missing_values == 0:
# missing_values = 0 not allowed with sparse data as it would
# force densification
raise ValueError(
"Sparse input with missing_values=0 is "
"not supported. Provide a dense "
"array instead."
)
return X
def _fit(self, X, y=None, precomputed=False):
"""Fit the transformer on `X`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
If `precomputed=True`, then `X` is a mask of the input data.
precomputed : bool
Whether the input data is a mask.
Returns
-------
imputer_mask : {ndarray, sparse matrix} of shape (n_samples, \
n_features)
The imputer mask of the original data.
"""
if precomputed:
if not (hasattr(X, "dtype") and X.dtype.kind == "b"):
raise ValueError("precomputed is True but the input data is not a mask")
self._precomputed = True
else:
self._precomputed = False
# Need not validate X again as it would have already been validated
# in the Imputer calling MissingIndicator
if not self._precomputed:
X = self._validate_input(X, in_fit=True)
self._n_features = X.shape[1]
missing_features_info = self._get_missing_features_info(X)
self.features_ = missing_features_info[1]
return missing_features_info[0]
def fit(self, X, y=None):
"""Fit the transformer on `X`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
self._fit(X, y)
return self
def transform(self, X):
"""Generate missing values indicator for `X`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) \
or (n_samples, n_features_with_missing)
The missing indicator for input data. The data type of `Xt`
will be boolean.
"""
check_is_fitted(self)
# Need not validate X again as it would have already been validated
# in the Imputer calling MissingIndicator
if not self._precomputed:
X = self._validate_input(X, in_fit=False)
else:
if not (hasattr(X, "dtype") and X.dtype.kind == "b"):
raise ValueError("precomputed is True but the input data is not a mask")
imputer_mask, features = self._get_missing_features_info(X)
if self.features == "missing-only":
features_diff_fit_trans = np.setdiff1d(features, self.features_)
if self.error_on_new and features_diff_fit_trans.size > 0:
raise ValueError(
"The features {} have missing values "
"in transform but have no missing values "
"in fit.".format(features_diff_fit_trans)
)
if self.features_.size < self._n_features:
imputer_mask = imputer_mask[:, self.features_]
return imputer_mask
def fit_transform(self, X, y=None):
"""Generate missing values indicator for `X`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data to complete.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) \
or (n_samples, n_features_with_missing)
The missing indicator for input data. The data type of `Xt`
will be boolean.
"""
self._validate_params()
imputer_mask = self._fit(X, y)
if self.features_.size < self._n_features:
imputer_mask = imputer_mask[:, self.features_]
return imputer_mask
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
input_features = _check_feature_names_in(self, input_features)
prefix = self.__class__.__name__.lower()
return np.asarray(
[
f"{prefix}_{feature_name}"
for feature_name in input_features[self.features_]
],
dtype=object,
)
def _more_tags(self):
return {
"allow_nan": True,
"X_types": ["2darray", "string"],
"preserves_dtype": [],
}
| bsd-3-clause |
theflofly/tensorflow | tensorflow/python/data/experimental/kernel_tests/csv_dataset_test.py | 10 | 20083 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.CsvDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.python.data.experimental.ops import error_ops
from tensorflow.python.data.experimental.ops import readers
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class CsvDatasetTest(test_base.DatasetTestBase):
def _setup_files(self, inputs, linebreak='\n', compression_type=None):
filenames = []
for i, ip in enumerate(inputs):
fn = os.path.join(self.get_temp_dir(), 'temp_%d.csv' % i)
contents = linebreak.join(ip).encode('utf-8')
if compression_type is None:
with open(fn, 'wb') as f:
f.write(contents)
elif compression_type == 'GZIP':
with gzip.GzipFile(fn, 'wb') as f:
f.write(contents)
elif compression_type == 'ZLIB':
contents = zlib.compress(contents)
with open(fn, 'wb') as f:
f.write(contents)
else:
raise ValueError('Unsupported compression_type', compression_type)
filenames.append(fn)
return filenames
def _make_test_datasets(self, inputs, **kwargs):
# Test by comparing its output to what we could get with map->decode_csv
filenames = self._setup_files(inputs)
dataset_expected = core_readers.TextLineDataset(filenames)
dataset_expected = dataset_expected.map(
lambda l: parsing_ops.decode_csv(l, **kwargs))
dataset_actual = readers.CsvDataset(filenames, **kwargs)
return (dataset_actual, dataset_expected)
def _test_by_comparison(self, inputs, **kwargs):
"""Checks that CsvDataset is equiv to TextLineDataset->map(decode_csv)."""
dataset_actual, dataset_expected = self._make_test_datasets(
inputs, **kwargs)
self.assertDatasetsEqual(dataset_actual, dataset_expected)
def _verify_output_or_err(self,
dataset,
expected_output=None,
expected_err_re=None):
if expected_err_re is None:
# Verify that output is expected, without errors
nxt = self.getNext(dataset)
expected_output = [[
v.encode('utf-8') if isinstance(v, str) else v for v in op
] for op in expected_output]
for value in expected_output:
op = self.evaluate(nxt())
self.assertAllEqual(op, value)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(nxt())
else:
nxt = self.getNext(dataset)
while True:
try:
self.evaluate(nxt())
except errors.OutOfRangeError:
break
def _test_dataset(
self,
inputs,
expected_output=None,
expected_err_re=None,
linebreak='\n',
compression_type=None, # Used for both setup and parsing
**kwargs):
"""Checks that elements produced by CsvDataset match expected output."""
# Convert str type because py3 tf strings are bytestrings
filenames = self._setup_files(inputs, linebreak, compression_type)
kwargs['compression_type'] = compression_type
if expected_err_re is not None:
# Verify that OpError is produced as expected
with self.assertRaisesOpError(expected_err_re):
dataset = readers.CsvDataset(filenames, **kwargs)
self._verify_output_or_err(dataset, expected_output, expected_err_re)
else:
dataset = readers.CsvDataset(filenames, **kwargs)
self._verify_output_or_err(dataset, expected_output, expected_err_re)
def testCsvDataset_requiredFields(self):
record_defaults = [[]] * 4
inputs = [['1,2,3,4']]
self._test_by_comparison(inputs, record_defaults=record_defaults)
def testCsvDataset_int(self):
record_defaults = [[0]] * 4
inputs = [['1,2,3,4', '5,6,7,8']]
self._test_by_comparison(inputs, record_defaults=record_defaults)
def testCsvDataset_float(self):
record_defaults = [[0.0]] * 4
inputs = [['1.0,2.1,3.2,4.3', '5.4,6.5,7.6,8.7']]
self._test_by_comparison(inputs, record_defaults=record_defaults)
def testCsvDataset_string(self):
record_defaults = [['']] * 4
inputs = [['1.0,2.1,hello,4.3', '5.4,6.5,goodbye,8.7']]
self._test_by_comparison(inputs, record_defaults=record_defaults)
def testCsvDataset_withEmptyFields(self):
record_defaults = [[0]] * 4
inputs = [[',,,', '1,1,1,', ',2,2,2']]
self._test_dataset(
inputs, [[0, 0, 0, 0], [1, 1, 1, 0], [0, 2, 2, 2]],
record_defaults=record_defaults)
def testCsvDataset_errWithUnquotedQuotes(self):
record_defaults = [['']] * 3
inputs = [['1,2"3,4']]
self._test_dataset(
inputs,
expected_err_re='Unquoted fields cannot have quotes inside',
record_defaults=record_defaults)
def testCsvDataset_errWithUnescapedQuotes(self):
record_defaults = [['']] * 3
inputs = [['"a"b","c","d"']]
self._test_dataset(
inputs,
expected_err_re=
'Quote inside a string has to be escaped by another quote',
record_defaults=record_defaults)
def testCsvDataset_ignoreErrWithUnescapedQuotes(self):
record_defaults = [['']] * 3
inputs = [['1,"2"3",4', '1,"2"3",4",5,5', 'a,b,"c"d"', 'e,f,g']]
filenames = self._setup_files(inputs)
dataset = readers.CsvDataset(filenames, record_defaults=record_defaults)
dataset = dataset.apply(error_ops.ignore_errors())
self._verify_output_or_err(dataset, [['e', 'f', 'g']])
def testCsvDataset_ignoreErrWithUnquotedQuotes(self):
record_defaults = [['']] * 3
inputs = [['1,2"3,4', 'a,b,c"d', '9,8"7,6,5', 'e,f,g']]
filenames = self._setup_files(inputs)
dataset = readers.CsvDataset(filenames, record_defaults=record_defaults)
dataset = dataset.apply(error_ops.ignore_errors())
self._verify_output_or_err(dataset, [['e', 'f', 'g']])
def testCsvDataset_withNoQuoteDelimAndUnquotedQuotes(self):
record_defaults = [['']] * 3
inputs = [['1,2"3,4']]
self._test_by_comparison(
inputs, record_defaults=record_defaults, use_quote_delim=False)
def testCsvDataset_mixedTypes(self):
record_defaults = [
constant_op.constant([], dtype=dtypes.int32),
constant_op.constant([], dtype=dtypes.float32),
constant_op.constant([], dtype=dtypes.string),
constant_op.constant([], dtype=dtypes.float64)
]
inputs = [['1,2.1,3.2,4.3', '5,6.5,7.6,8.7']]
self._test_by_comparison(inputs, record_defaults=record_defaults)
def testCsvDataset_withUseQuoteDelimFalse(self):
record_defaults = [['']] * 4
inputs = [['1,2,"3,4"', '"5,6",7,8']]
self._test_by_comparison(
inputs, record_defaults=record_defaults, use_quote_delim=False)
def testCsvDataset_withFieldDelim(self):
record_defaults = [[0]] * 4
inputs = [['1:2:3:4', '5:6:7:8']]
self._test_by_comparison(
inputs, record_defaults=record_defaults, field_delim=':')
def testCsvDataset_withNaValue(self):
record_defaults = [[0]] * 4
inputs = [['1,NA,3,4', 'NA,6,7,8']]
self._test_by_comparison(
inputs, record_defaults=record_defaults, na_value='NA')
def testCsvDataset_withSelectCols(self):
record_defaults = [['']] * 2
inputs = [['1,2,3,4', '"5","6","7","8"']]
self._test_by_comparison(
inputs, record_defaults=record_defaults, select_cols=[1, 2])
def testCsvDataset_withSelectColsTooHigh(self):
record_defaults = [[0]] * 2
inputs = [['1,2,3,4', '5,6,7,8']]
self._test_dataset(
inputs,
expected_err_re='Expect 2 fields but have 1 in record',
record_defaults=record_defaults,
select_cols=[3, 4])
def testCsvDataset_withOneCol(self):
record_defaults = [['NA']]
inputs = [['0', '', '2']]
self._test_dataset(
inputs, [['0'], ['NA'], ['2']], record_defaults=record_defaults)
def testCsvDataset_withMultipleFiles(self):
record_defaults = [[0]] * 4
inputs = [['1,2,3,4', '5,6,7,8'], ['5,6,7,8']]
self._test_by_comparison(inputs, record_defaults=record_defaults)
def testCsvDataset_withLeadingAndTrailingSpaces(self):
record_defaults = [[0.0]] * 4
inputs = [['0, 1, 2, 3']]
expected = [[0.0, 1.0, 2.0, 3.0]]
self._test_dataset(inputs, expected, record_defaults=record_defaults)
def testCsvDataset_errorWithMissingDefault(self):
record_defaults = [[]] * 2
inputs = [['0,']]
self._test_dataset(
inputs,
expected_err_re='Field 1 is required but missing in record!',
record_defaults=record_defaults)
def testCsvDataset_errorWithFewerDefaultsThanFields(self):
record_defaults = [[0.0]] * 2
inputs = [['0,1,2,3']]
self._test_dataset(
inputs,
expected_err_re='Expect 2 fields but have more in record',
record_defaults=record_defaults)
def testCsvDataset_errorWithMoreDefaultsThanFields(self):
record_defaults = [[0.0]] * 5
inputs = [['0,1,2,3']]
self._test_dataset(
inputs,
expected_err_re='Expect 5 fields but have 4 in record',
record_defaults=record_defaults)
def testCsvDataset_withHeader(self):
record_defaults = [[0]] * 2
inputs = [['col1,col2', '1,2']]
expected = [[1, 2]]
self._test_dataset(
inputs,
expected,
record_defaults=record_defaults,
header=True,
)
def testCsvDataset_withHeaderAndNoRecords(self):
record_defaults = [[0]] * 2
inputs = [['col1,col2']]
expected = []
self._test_dataset(
inputs,
expected,
record_defaults=record_defaults,
header=True,
)
def testCsvDataset_errorWithHeaderEmptyFile(self):
record_defaults = [[0]] * 2
inputs = [[]]
expected_err_re = "Can't read header of file"
self._test_dataset(
inputs,
expected_err_re=expected_err_re,
record_defaults=record_defaults,
header=True,
)
def testCsvDataset_withEmptyFile(self):
record_defaults = [['']] * 2
inputs = [['']] # Empty file
self._test_dataset(
inputs, expected_output=[], record_defaults=record_defaults)
def testCsvDataset_errorWithEmptyRecord(self):
record_defaults = [['']] * 2
inputs = [['', '1,2']] # First record is empty
self._test_dataset(
inputs,
expected_err_re='Expect 2 fields but have 1 in record',
record_defaults=record_defaults)
def testCsvDataset_withChainedOps(self):
# Testing that one dataset can create multiple iterators fine.
# `repeat` creates multiple iterators from the same C++ Dataset.
record_defaults = [[0]] * 4
inputs = [['1,,3,4', '5,6,,8']]
ds_actual, ds_expected = self._make_test_datasets(
inputs, record_defaults=record_defaults)
self.assertDatasetsEqual(
ds_actual.repeat(5).prefetch(1),
ds_expected.repeat(5).prefetch(1))
def testCsvDataset_withTypeDefaults(self):
# Testing using dtypes as record_defaults for required fields
record_defaults = [dtypes.float32, [0.0]]
inputs = [['1.0,2.0', '3.0,4.0']]
self._test_dataset(
inputs,
[[1.0, 2.0], [3.0, 4.0]],
record_defaults=record_defaults,
)
def testMakeCsvDataset_fieldOrder(self):
data = [[
'1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19',
'1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19'
]]
file_path = self._setup_files(data)
ds = readers.make_csv_dataset(
file_path, batch_size=1, shuffle=False, num_epochs=1)
nxt = self.getNext(ds)
result = list(self.evaluate(nxt()).values())
self.assertEqual(result, sorted(result))
## The following tests exercise parsing logic for quoted fields
def testCsvDataset_withQuoted(self):
record_defaults = [['']] * 4
inputs = [['"a","b","c :)","d"', '"e","f","g :(","h"']]
self._test_by_comparison(inputs, record_defaults=record_defaults)
def testCsvDataset_withOneColAndQuotes(self):
record_defaults = [['']]
inputs = [['"0"', '"1"', '"2"']]
self._test_dataset(
inputs, [['0'], ['1'], ['2']], record_defaults=record_defaults)
def testCsvDataset_withNewLine(self):
# In this case, we expect it to behave differently from
# TextLineDataset->map(decode_csv) since that flow has bugs
record_defaults = [['']] * 4
inputs = [['a,b,"""c""\n0","d\ne"', 'f,g,h,i']]
expected = [['a', 'b', '"c"\n0', 'd\ne'], ['f', 'g', 'h', 'i']]
self._test_dataset(inputs, expected, record_defaults=record_defaults)
def testCsvDataset_withNewLineInUnselectedCol(self):
record_defaults = [['']]
inputs = [['1,"2\n3",4', '5,6,7']]
self._test_dataset(
inputs,
expected_output=[['1'], ['5']],
record_defaults=record_defaults,
select_cols=[0])
def testCsvDataset_withMultipleNewLines(self):
# In this case, we expect it to behave differently from
# TextLineDataset->map(decode_csv) since that flow has bugs
record_defaults = [['']] * 4
inputs = [['a,"b\n\nx","""c""\n \n0","d\ne"', 'f,g,h,i']]
expected = [['a', 'b\n\nx', '"c"\n \n0', 'd\ne'], ['f', 'g', 'h', 'i']]
self._test_dataset(inputs, expected, record_defaults=record_defaults)
def testCsvDataset_errorWithTerminateMidRecord(self):
record_defaults = [['']] * 4
inputs = [['a,b,c,"a']]
self._test_dataset(
inputs,
expected_err_re=
'Reached end of file without closing quoted field in record',
record_defaults=record_defaults)
def testCsvDataset_withEscapedQuotes(self):
record_defaults = [['']] * 4
inputs = [['1.0,2.1,"she said: ""hello""",4.3', '5.4,6.5,goodbye,8.7']]
self._test_by_comparison(inputs, record_defaults=record_defaults)
## Testing that parsing works with all buffer sizes, quoted/unquoted fields,
## and different types of line breaks
def testCsvDataset_withInvalidBufferSize(self):
record_defaults = [['']] * 4
inputs = [['a,b,c,d']]
self._test_dataset(
inputs,
expected_err_re='buffer_size should be positive',
record_defaults=record_defaults,
buffer_size=0)
def _test_dataset_on_buffer_sizes(self,
inputs,
expected,
linebreak,
record_defaults,
compression_type=None,
num_sizes_to_test=20):
# Testing reading with a range of buffer sizes that should all work.
for i in list(range(1, 1 + num_sizes_to_test)) + [None]:
self._test_dataset(
inputs,
expected,
linebreak=linebreak,
compression_type=compression_type,
record_defaults=record_defaults,
buffer_size=i)
def testCsvDataset_withLF(self):
record_defaults = [['NA']] * 3
inputs = [['abc,def,ghi', '0,1,2', ',,']]
expected = [['abc', 'def', 'ghi'], ['0', '1', '2'], ['NA', 'NA', 'NA']]
self._test_dataset_on_buffer_sizes(
inputs, expected, linebreak='\n', record_defaults=record_defaults)
def testCsvDataset_withCR(self):
# Test that when the line separator is '\r', parsing works with all buffer
# sizes
record_defaults = [['NA']] * 3
inputs = [['abc,def,ghi', '0,1,2', ',,']]
expected = [['abc', 'def', 'ghi'], ['0', '1', '2'], ['NA', 'NA', 'NA']]
self._test_dataset_on_buffer_sizes(
inputs, expected, linebreak='\r', record_defaults=record_defaults)
def testCsvDataset_withCRLF(self):
# Test that when the line separator is '\r\n', parsing works with all buffer
# sizes
record_defaults = [['NA']] * 3
inputs = [['abc,def,ghi', '0,1,2', ',,']]
expected = [['abc', 'def', 'ghi'], ['0', '1', '2'], ['NA', 'NA', 'NA']]
self._test_dataset_on_buffer_sizes(
inputs, expected, linebreak='\r\n', record_defaults=record_defaults)
def testCsvDataset_withBufferSizeAndQuoted(self):
record_defaults = [['NA']] * 3
inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']]
expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'],
['NA', 'NA', 'NA']]
self._test_dataset_on_buffer_sizes(
inputs, expected, linebreak='\n', record_defaults=record_defaults)
def testCsvDataset_withCRAndQuoted(self):
# Test that when the line separator is '\r', parsing works with all buffer
# sizes
record_defaults = [['NA']] * 3
inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']]
expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'],
['NA', 'NA', 'NA']]
self._test_dataset_on_buffer_sizes(
inputs, expected, linebreak='\r', record_defaults=record_defaults)
def testCsvDataset_withCRLFAndQuoted(self):
# Test that when the line separator is '\r\n', parsing works with all buffer
# sizes
record_defaults = [['NA']] * 3
inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']]
expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'],
['NA', 'NA', 'NA']]
self._test_dataset_on_buffer_sizes(
inputs, expected, linebreak='\r\n', record_defaults=record_defaults)
def testCsvDataset_withGzipCompressionType(self):
record_defaults = [['NA']] * 3
inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']]
expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'],
['NA', 'NA', 'NA']]
self._test_dataset_on_buffer_sizes(
inputs,
expected,
linebreak='\r\n',
compression_type='GZIP',
record_defaults=record_defaults)
def testCsvDataset_withZlibCompressionType(self):
record_defaults = [['NA']] * 3
inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']]
expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'],
['NA', 'NA', 'NA']]
self._test_dataset_on_buffer_sizes(
inputs,
expected,
linebreak='\r\n',
compression_type='ZLIB',
record_defaults=record_defaults)
def testCsvDataset_withScalarDefaults(self):
record_defaults = [constant_op.constant(0, dtype=dtypes.int64)] * 4
inputs = [[',,,', '1,1,1,', ',2,2,2']]
self._test_dataset(
inputs, [[0, 0, 0, 0], [1, 1, 1, 0], [0, 2, 2, 2]],
record_defaults=record_defaults)
def testCsvDataset_with2DDefaults(self):
record_defaults = [constant_op.constant([[0]], dtype=dtypes.int64)] * 4
inputs = [[',,,', '1,1,1,', ',2,2,2']]
if context.executing_eagerly():
err_spec = errors.InvalidArgumentError, (
'Each record default should be at '
'most rank 1.')
else:
err_spec = ValueError, 'Shape must be at most rank 1 but is rank 2'
with self.assertRaisesWithPredicateMatch(*err_spec):
self._test_dataset(
inputs, [[0, 0, 0, 0], [1, 1, 1, 0], [0, 2, 2, 2]],
record_defaults=record_defaults)
if __name__ == '__main__':
test.main()
| apache-2.0 |
numenta/nupic.research | packages/archive/src/nupic/research/archive/continual_learning/split_mnist_byclass.py | 3 | 3499 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2020, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import numpy as np
import torch
from torchvision import datasets
class MNISTSplitter(object):
""" Just a simple class for downloading and splitting your MNIST dataset
:param data_dir: Where you want the data to live. Defaults to None
"""
def __init__(self, data_dir=None):
if data_dir is None:
data_dir = "/home/ec2-user/nta/data/mnist/"
if not os.path.isdir(data_dir):
print("Making directory {}".format(data_dir))
os.mkdir(data_dir)
if len(os.listdir(data_dir)) > 0.0:
print("Warning: will delete and replace local files")
for file_path in os.listdir(data_dir):
try:
os.remove(os.path.join(data_dir, file_path))
except OSError as err:
print("Error {} : {}".format(os.path.join(data_dir,
file_path), err))
self.data_dir = data_dir
self.num_classes = 10
self.train_data, self.test_data = self.get_datasets(self.data_dir)
def get_datasets(self, data_dir):
""" Get the datasets
"""
train_dataset = datasets.MNIST(data_dir, download=True, train=True)
test_dataset = datasets.MNIST(data_dir, download=True, train=False)
print("Saved data to {}".format(data_dir))
return train_dataset, test_dataset
def split_mnist(self, data_dir):
""" Get tensors for each class and save them individually
"""
xs_train, ys_train = self.train_data.data, self.train_data.targets
xs_test, ys_test = self.test_data.data, self.test_data.targets
for class_ in range(self.num_classes):
# Training data
y_inds = np.where(ys_train == class_)[0]
x_class = xs_train[y_inds, :, :]
torch.save(
(x_class, class_ * torch.ones(len(y_inds))),
data_dir + "/mnist_train_{}.npz".format(class_),
)
# Test data
y_inds = np.where(ys_test == class_)[0]
x_class = xs_test[y_inds, :, :]
torch.save(
(x_class, class_ * torch.ones(len(y_inds))),
data_dir + "/mnist_test_{}.npz".format(class_),
)
if __name__ == "__main__":
data_dir = "/home/ec2-user/nta/data/mnist/"
splitter = MNISTSplitter(data_dir=data_dir)
print("Splitting... ")
splitter.split_mnist(data_dir)
print("Done!")
| agpl-3.0 |
Subsets and Splits