repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
keflavich/TurbuStat | turbustat/statistics/wavelets/wavelet_transform.py | 1 | 13165 | # Licensed under an MIT open source license - see LICENSE
import numpy as np
import warnings
from astropy.convolution import convolve_fft, MexicanHat2DKernel
import statsmodels.formula.api as sm
from pandas import Series, DataFrame
try:
from scipy.fftpack import fftn, ifftn, fftfreq
except ImportError:
from numpy.fft import fftn, ifftn, fftfreq
class Mexican_hat():
'''
Implements the Mexican hat wavelet class.
Code is from kPyWavelet.
'''
name = 'Mexican hat'
def __init__(self):
# Reconstruction factor $C_{\psi, \delta}$
self.cpsi = 1. # pi
def psi_ft(self, k, l):
'''
Fourier transform of the Mexican hat wavelet as in Wang and
Lu (2010), equation [15].
'''
K, L = np.meshgrid(k, l)
return (K ** 2. + L ** 2.) * np.exp(-0.5 * (K ** 2. + L ** 2.))
def psi(self, x, y):
'''
Mexican hat wavelet as in Wang and Lu (2010), equation [14].
'''
X, Y = np.meshgrid(x, y)
return (2. - (X ** 2. + Y ** 2.)) * np.exp(-0.5 * (X ** 2. + Y ** 2.))
class wt2D(object):
'''
Compute the wavelet transform of a 2D array.
Parameters
----------
array : numpy.ndarray
2D array.
header : FITS header
Header for the array.
scales : numpy.ndarray or list
The scales where the transform is calculated.
dx : float, optional
Spacing in the x-direction.
dy : float, optional
Spacing in the y-direction.
wavelet : wavelet class
The wavelet class to use.
'''
def __init__(self, array, header, scales=None, dx=0.25, dy=0.25,
wavelet=Mexican_hat(), num=50, ang_units=True):
super(wt2D, self).__init__()
self.array = array.astype("f8")
self.header = header
self.wavelet = wavelet
if scales is None:
a_min = round((5. / 3.), 3) # Smallest scale given by paper
self.scales = np.logspace(
np.log10(a_min), np.log10(min(self.array.shape)), num)
else:
self.scales = scales
### NOTE: can't use nan_interpolating from astropy
### until the normalization for sum to zeros kernels is fixed!!!
self.array[np.isnan(self.array)] = np.nanmin(self.array)
self.nan_flag = False
if np.isnan(self.array).any():
self.nan_flag = True
if ang_units:
try:
self.imgscale = np.abs(self.header["CDELT2"])
except ValueError:
warnings.warn("Header doesn't not contain the\
angular size. Reverting to pixel scales.")
ang_units = False
if not ang_units:
self.imgscale = 1.0
a_min = 5 / 3. # Minimum scale size given by Gill and Henriksen (90)
self.dx = dx * a_min
self.dy = dy * a_min
self.Wf = None
self.iWf = None
def cwt2d(self, dx=None, dy=None):
'''
Bi-dimensional continuous wavelet transform of the signal at
specified scale a.
Parameters
----------
dx : float, optional
Spacing in the x-direction.
dy : float, optional
Spacing in the y-direction.
'''
if dx is not None:
assert isinstance(dx, list)
self.dx = dx
if dy is not None:
assert isinstance(dy, list)
self.dx = dy
# Determines the shape of the arrays and the discrete scales.
n0, m0 = self.array.shape
N, M = 2 ** int(np.ceil(np.log2(n0))), 2 ** int(np.ceil(np.log2(m0)))
if self.scales is None:
self.scales = 2 ** np.arange(int(np.floor(np.log2(min(n0, m0)))))
A = len(self.scales)
# Calculates the zonal and meridional wave numbers.
l, k = fftfreq(N, self.dy), fftfreq(M, self.dx)
# Calculates the Fourier transform of the input signal.
f_ft = fftn(self.array, shape=(N, M))
# Creates empty wavelet transform array and fills it for every discrete
# scale using the convolution theorem.
self.Wf = np.zeros((A, N, M), 'complex')
for i, an in enumerate(self.scales):
psi_ft_bar = an * self.wavelet.psi_ft(an * k, an * l)
self.Wf[i, :, :] = ifftn(f_ft * psi_ft_bar, shape=(N, M))
self.Wf = self.Wf[:, :n0, :m0]
return self
def astropy_cwt2d(self, dx=None, dy=None):
'''
Same as cwt2D except it uses astropy.convolve_fft's ability
to interpolate over NaNs.
Parameters
----------
dx : float, optional
Spacing in the x-direction.
dy : float, optional
Spacing in the y-direction.
'''
if dx is not None:
assert isinstance(dx, list)
self.dx = dx
if dy is not None:
assert isinstance(dy, list)
self.dx = dy
n0, m0 = self.array.shape
N, M = 2 ** int(np.ceil(np.log2(n0))), 2 ** int(np.ceil(np.log2(m0)))
if self.scales is None:
self.scales = 2 ** np.arange(int(np.floor(np.log2(min(n0, m0)))))
A = len(self.scales)
self.Wf = np.zeros((A, N, M), 'complex')
for i, an in enumerate(self.scales):
psi = MexicanHat2DKernel(an, x_size=n0, y_size=m0)
self.Wf[i, :, :] = convolve_fft(self.array, psi,
interpolate_nan=True,
normalize_kernel=True,
fftn=fftn, ifftn=ifftn)
self.Wf = self.Wf[:, :n0, :m0]
return self
def icwt2d(self, da=0.25):
'''
Inverse bi-dimensional continuous wavelet transform as in Wang and
Lu (2010), equation [5].
Parameters
----------
da : float, optional
Spacing in the frequency axis.
'''
if self.Wf is None:
raise TypeError("Run cwt2D before icwt2D")
m0, l0, k0 = self.Wf.shape
if m0 != self.scales.size:
raise Warning('Scale parameter array shape does not match\
wavelet transform array shape.')
# Calculates the zonal and meridional wave numters.
L, K = 2 ** int(np.ceil(np.log2(l0))), 2 ** int(np.ceil(np.log2(k0)))
# Calculates the zonal and meridional wave numbers.
l, k = fftfreq(L, self.dy), fftfreq(K, self.dx)
# Creates empty inverse wavelet transform array and fills it for every
# discrete scale using the convolution theorem.
self.iWf = np.zeros((m0, L, K), 'complex')
for i, an in enumerate(self.scales):
psi_ft_bar = an * self.wavelet.psi_ft(an * k, an * l)
W_ft = fftn(self.Wf[i, :, :], s=(L, K))
self.iWf[i, :, :] = ifftn(W_ft * psi_ft_bar, s=(L, K)) *\
da / an ** 2.
self.iWf = self.iWf[:, :l0, :k0].real.sum(axis=0) / self.wavelet.cpsi
return self
def make_1D_transform(self):
self.curve = transform((self.Wf, self.scales), self.imgscale)
def run(self):
'''
Compute the Wavelet transform.
'''
if self.nan_flag:
self.astropy_cwt2d()
else:
self.cwt2d()
self.make_1D_transform()
class Wavelet_Distance(object):
'''
Compute the distance between the two cubes using the Wavelet transform.
We fit a linear model to the two wavelet transforms. The distance is the
t-statistic of the interaction term describing the difference in the
slopes.
Parameters
----------
dataset1 : FITS hdu
2D image.
dataset2 : FITS hdu
2D image.
wavelet : class
Wavelet class. Only Mexican_hat() is implemented.
ang_units : bool, optional
Sets whether to use angular units.
scales : numpy.ndarray or list
The scales where the transform is calculated.
num : int
Number of scales to calculate the transform at.
dx : float, optional
Spacing in the x-direction.
dy : float, optional
Spacing in the y-direction.
fiducial_model : wt2D
Computed wt2D object. use to avoid recomputing.
'''
def __init__(self, dataset1, dataset2, wavelet=Mexican_hat(),
ang_units=True, scales=None, num=50, dx=0.25, dy=0.25,
fiducial_model=None):
super(Wavelet_Distance, self).__init__()
array1 = dataset1[0]
header1 = dataset1[1]
array2 = dataset2[0]
header2 = dataset2[1]
if fiducial_model is None:
self.wt1 = wt2D(array1, header1, scales=scales, wavelet=wavelet,
ang_units=ang_units)
self.wt1.run()
else:
self.wt1 = fiducial_model
self.wt2 = wt2D(array2, header2, scales=scales, wavelet=wavelet,
ang_units=ang_units)
self.wt2.run()
self.curve1 = self.wt1.curve
self.curve2 = self.wt2.curve
self.results = None
self.distance = None
def distance_metric(self, non_linear=True, verbose=False):
'''
Implements the distance metric for 2 wavelet transforms.
We fit the linear portion of the transform to represent the powerlaw
Parameters
----------
non_linear : bool, optional
Enables clipping of non-linear portions of the transform.
verbose : bool, optional
Enables plotting.
'''
if non_linear:
self.curve1 = clip_to_linear(self.curve1)
self.curve2 = clip_to_linear(self.curve2)
dummy = [0] * len(self.curve1[0, :]) + [1] * len(self.curve2[0, :])
x = np.concatenate((self.curve1[0, :], self.curve2[0, :]))
regressor = x.T * dummy
log_T_g = np.concatenate((self.curve1[1, :], self.curve2[1, :]))
d = {"dummy": Series(dummy), "scales": Series(
x), "log_T_g": Series(log_T_g), "regressor": Series(regressor)}
df = DataFrame(d)
model = sm.ols(formula="log_T_g ~ dummy + scales + regressor", data=df)
self.results = model.fit()
self.distance = np.abs(self.results.tvalues["regressor"])
if verbose:
print self.results.summary()
import matplotlib.pyplot as p
p.plot(self.curve1[0, :], self.curve1[1, :], 'bD',
self.curve2[0, :], self.curve2[1, :], 'gD')
p.plot(self.curve1[0, :],
self.results.fittedvalues[:len(self.curve1[1, :])], "b",
self.curve2[0, :],
self.results.fittedvalues[-len(self.curve2[1, :]):], "g")
p.grid(True)
p.xlabel("log a")
p.ylabel(r"log $T_g$")
p.show()
return self
def clip_to_linear(data, threshold=1.0, kernel_width=0.1, ends_clipped=0.05):
'''
Takes the second derivative of the data with a ricker wavelet.
Data is clipped to the linear portion (2nd derivative ~ 0)
Parameters
----------
data : numpy.ndarray
x and y data.
threshold : float, optional
Acceptable value of the second derivative to be called linear.
kernel_width : float, optional
Kernel width set to this percentage of the data length
ends_clipped : float, optional
Percentage of data to clip off at the ends. End points have residual
effects from the convolution.
Returns
-------
data_clipped : numpy.ndarray
Linear portion of the data set returned.
'''
from scipy.signal import ricker
y = data[1, :]
x = data[0, :]
num_pts = len(y)
kernel = ricker(num_pts, num_pts * kernel_width)
sec_deriv = np.convolve(y, kernel, mode="same")
# Ends go back to being ~ linear, so clip them off
if ends_clipped > 0.0:
clipped_pts = int(num_pts * ends_clipped)
sec_deriv = sec_deriv[clipped_pts: num_pts - clipped_pts]
y = y[clipped_pts: num_pts - clipped_pts]
x = x[clipped_pts: num_pts - clipped_pts]
linear_pts = np.abs(sec_deriv) < threshold
data_clipped = np.empty((2, len(y[linear_pts])))
data_clipped[:, :] = x[linear_pts], y[linear_pts]
return data_clipped
def transform(data, imgscale):
'''
Put output of the wavelet transform into the mean of the nonzero components
This reduces the dataset to 1D.
Parameters
----------
data : tuple
Contains N arrays and scales from the transform.
Returns
-------
data_1D - numpy.ndarray
Scales in the first column and log <T_g> in the second.
'''
wav_arrays = data[0]
scales = data[1]
log_av_T_g = []
for i in range(len(scales)):
average_Tg_i = np.log10(np.abs(wav_arrays[i, :, :]
[wav_arrays[i, :, :] > 0]).mean())
log_av_T_g.append(average_Tg_i)
physical_scales = np.log10(scales * imgscale)
data_1D = np.array([physical_scales, log_av_T_g])
return data_1D
| mit |
Obus/scikit-learn | sklearn/tests/test_learning_curve.py | 225 | 10791 | # Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
dipanjanS/text-analytics-with-python | New-Second-Edition/Ch05 - Text Classification/model_evaluation_utils.py | 2 | 9263 | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 31 20:05:23 2017
@author: DIP
@Copyright: Dipanjan Sarkar
"""
from sklearn import metrics
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.base import clone
from sklearn.preprocessing import label_binarize
from scipy import interp
from sklearn.metrics import roc_curve, auc
def get_metrics(true_labels, predicted_labels):
print('Accuracy:', np.round(
metrics.accuracy_score(true_labels,
predicted_labels),
4))
print('Precision:', np.round(
metrics.precision_score(true_labels,
predicted_labels,
average='weighted'),
4))
print('Recall:', np.round(
metrics.recall_score(true_labels,
predicted_labels,
average='weighted'),
4))
print('F1 Score:', np.round(
metrics.f1_score(true_labels,
predicted_labels,
average='weighted'),
4))
def train_predict_model(classifier,
train_features, train_labels,
test_features, test_labels):
# build model
classifier.fit(train_features, train_labels)
# predict using model
predictions = classifier.predict(test_features)
return predictions
def display_confusion_matrix(true_labels, predicted_labels, classes=[1,0]):
total_classes = len(classes)
level_labels = [total_classes*[0], list(range(total_classes))]
cm = metrics.confusion_matrix(y_true=true_labels, y_pred=predicted_labels,
labels=classes)
cm_frame = pd.DataFrame(data=cm,
columns=pd.MultiIndex(levels=[['Predicted:'], classes],
labels=level_labels),
index=pd.MultiIndex(levels=[['Actual:'], classes],
labels=level_labels))
print(cm_frame)
def display_confusion_matrix_pretty(true_labels, predicted_labels, classes=[1,0]):
total_classes = len(classes)
level_labels = [total_classes*[0], list(range(total_classes))]
cm = metrics.confusion_matrix(y_true=true_labels, y_pred=predicted_labels,
labels=classes)
cm_frame = pd.DataFrame(data=cm,
columns=pd.MultiIndex(levels=[['Predicted:'], classes],
labels=level_labels),
index=pd.MultiIndex(levels=[['Actual:'], classes],
labels=level_labels))
return cm_frame
def display_classification_report(true_labels, predicted_labels, classes=[1,0]):
report = metrics.classification_report(y_true=true_labels,
y_pred=predicted_labels,
labels=classes)
print(report)
def display_model_performance_metrics(true_labels, predicted_labels, classes=[1,0]):
print('Model Performance metrics:')
print('-'*30)
get_metrics(true_labels=true_labels, predicted_labels=predicted_labels)
print('\nModel Classification report:')
print('-'*30)
display_classification_report(true_labels=true_labels, predicted_labels=predicted_labels,
classes=classes)
print('\nPrediction Confusion Matrix:')
print('-'*30)
display_confusion_matrix(true_labels=true_labels, predicted_labels=predicted_labels,
classes=classes)
def plot_model_decision_surface(clf, train_features, train_labels,
plot_step=0.02, cmap=plt.cm.RdYlBu,
markers=None, alphas=None, colors=None):
if train_features.shape[1] != 2:
raise ValueError("X_train should have exactly 2 columnns!")
x_min, x_max = train_features[:, 0].min() - plot_step, train_features[:, 0].max() + plot_step
y_min, y_max = train_features[:, 1].min() - plot_step, train_features[:, 1].max() + plot_step
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
clf_est = clone(clf)
clf_est.fit(train_features,train_labels)
if hasattr(clf_est, 'predict_proba'):
Z = clf_est.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:,1]
else:
Z = clf_est.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
le = LabelEncoder()
y_enc = le.fit_transform(train_labels)
n_classes = len(le.classes_)
plot_colors = ''.join(colors) if colors else [None] * n_classes
label_names = le.classes_
markers = markers if markers else [None] * n_classes
alphas = alphas if alphas else [None] * n_classes
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y_enc == i)
plt.scatter(train_features[idx, 0], train_features[idx, 1], c=color,
label=label_names[i], cmap=cmap, edgecolors='black',
marker=markers[i], alpha=alphas[i])
plt.legend()
plt.show()
def plot_model_roc_curve(clf, features, true_labels, label_encoder=None, class_names=None):
## Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
if hasattr(clf, 'classes_'):
class_labels = clf.classes_
elif label_encoder:
class_labels = label_encoder.classes_
elif class_names:
class_labels = class_names
else:
raise ValueError('Unable to derive prediction classes, please specify class_names!')
n_classes = len(class_labels)
y_test = label_binarize(true_labels, classes=class_labels)
if n_classes == 2:
if hasattr(clf, 'predict_proba'):
prob = clf.predict_proba(features)
y_score = prob[:, prob.shape[1]-1]
elif hasattr(clf, 'decision_function'):
prob = clf.decision_function(features)
y_score = prob[:, prob.shape[1]-1]
else:
raise AttributeError("Estimator doesn't have a probability or confidence scoring system!")
fpr, tpr, _ = roc_curve(y_test, y_score)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, label='ROC curve (area = {0:0.2f})'
''.format(roc_auc),
linewidth=2.5)
elif n_classes > 2:
if hasattr(clf, 'predict_proba'):
y_score = clf.predict_proba(features)
elif hasattr(clf, 'decision_function'):
y_score = clf.decision_function(features)
else:
raise AttributeError("Estimator doesn't have a probability or confidence scoring system!")
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
## Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
## Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
## Plot ROC curves
plt.figure(figsize=(6, 4))
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]), linewidth=3)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]), linewidth=3)
for i, label in enumerate(class_labels):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(label, roc_auc[i]),
linewidth=2, linestyle=':')
else:
raise ValueError('Number of classes should be atleast 2 or more')
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend(loc="lower right")
plt.show()
| apache-2.0 |
kiyoto/statsmodels | statsmodels/iolib/tests/test_summary.py | 31 | 1535 | '''examples to check summary, not converted to tests yet
'''
from __future__ import print_function
if __name__ == '__main__':
from statsmodels.regression.tests.test_regression import TestOLS
#def mytest():
aregression = TestOLS()
TestOLS.setupClass()
results = aregression.res1
r_summary = str(results.summary_old())
print(r_summary)
olsres = results
print('\n\n')
r_summary = str(results.summary())
print(r_summary)
print('\n\n')
from statsmodels.discrete.tests.test_discrete import TestProbitNewton
aregression = TestProbitNewton()
TestProbitNewton.setupClass()
results = aregression.res1
r_summary = str(results.summary())
print(r_summary)
print('\n\n')
probres = results
from statsmodels.robust.tests.test_rlm import TestHampel
aregression = TestHampel()
#TestHampel.setupClass()
results = aregression.res1
r_summary = str(results.summary())
print(r_summary)
rlmres = results
print('\n\n')
from statsmodels.genmod.tests.test_glm import TestGlmBinomial
aregression = TestGlmBinomial()
#TestGlmBinomial.setupClass()
results = aregression.res1
r_summary = str(results.summary())
print(r_summary)
#print(results.summary2(return_fmt='latex'))
#print(results.summary2(return_fmt='csv'))
smry = olsres.summary()
print(smry.as_csv())
# import matplotlib.pyplot as plt
# plt.plot(rlmres.model.endog,'o')
# plt.plot(rlmres.fittedvalues,'-')
#
# plt.show() | bsd-3-clause |
jabeerahmed/testrepo | term1/Lessons/LaneDetection/finding_lane_lines_color_region.py | 1 | 2342 | import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
# Read in the image and print out some stats
# Note: in the previous example we were reading a .jpg
# Here we read a .png and convert to 0,255 bytescale
image = mpimg.imread('../images/test.jpg')
# Grab the x and y size and make a copy of the image
ysize = image.shape[0]
xsize = image.shape[1]
color_select = np.copy(image)
line_image = np.copy(image)
# Define color selection criteria
# MODIFY THESE VARIABLES TO MAKE YOUR COLOR SELECTION
red_threshold = 200
green_threshold = 200
blue_threshold = 200
rgb_threshold = [red_threshold, green_threshold, blue_threshold]
# Define the vertices of a triangular mask.
# Keep in mind the origin (x=0, y=0) is in the upper left
# MODIFY THESE VALUES TO ISOLATE THE REGION
# WHERE THE LANE LINES ARE IN THE IMAGE
left_bottom = [0, ysize]
right_bottom = [xsize, ysize]
apex = [xsize/2, ysize/2]
# Perform a linear fit (y=Ax+B) to each of the three sides of the triangle
# np.polyfit returns the coefficients [A, B] of the fit
fit_left = np.polyfit((left_bottom[0], apex[0]), (left_bottom[1], apex[1]), 1)
fit_right = np.polyfit((right_bottom[0], apex[0]), (right_bottom[1], apex[1]), 1)
fit_bottom = np.polyfit((left_bottom[0], right_bottom[0]), (left_bottom[1], right_bottom[1]), 1)
# Mask pixels below the threshold
color_thresholds = (image[:,:,0] < rgb_threshold[0]) | \
(image[:,:,1] < rgb_threshold[1]) | \
(image[:,:,2] < rgb_threshold[2])
# Find the region inside the lines
XX, YY = np.meshgrid(np.arange(0, xsize), np.arange(0, ysize))
region_thresholds = (YY > (XX*fit_left[0] + fit_left[1])) & \
(YY > (XX*fit_right[0] + fit_right[1])) & \
(YY < (XX*fit_bottom[0] + fit_bottom[1]))
# Mask color and region selection
color_select[color_thresholds | ~region_thresholds] = [0, 0, 0]
# Color pixels red where both color and region selections met
line_image[~color_thresholds & region_thresholds] = [255, 0, 0]
# Display the image and show region and color selections
plt.imshow(image)
x = [left_bottom[0], right_bottom[0], apex[0], left_bottom[0]]
y = [left_bottom[1], right_bottom[1], apex[1], left_bottom[1]]
plt.plot(x, y, 'b--', lw=4)
plt.imshow(color_select)
plt.imshow(line_image)
plt.show()
| gpl-2.0 |
pytorn/hackr | hackr/Graphs.py | 1 | 2839 | import matplotlib.pyplot as plt
# Matplotlib module has been used for plotting
# Class plot1 has been made to plot given coordinates in different graph styles
# For each graph style a function has been defined
# For saving the figure a function has been defined , You have to provide the full path
# For example in Ubuntu , /home/username/filename.png
# The program is totally user Interactive
class plot1():
formatdic = {'-', '.', 'o', '+', 'd'}
colordic = {'Blue': 'b', 'Green': 'g', 'Red': 'r', 'Cyan': 'c', 'Yellow': 'y'}
def scatterplot(self, xcord, ycord, col, path):
'''
:param xcord:It is a list of x coordinates
:param ycord: It is a list of y coordinates
:param col: Enter color name ,for example Green ,Red , Cyan etc.
'''
plt.scatter(xcord, ycord, color=col, label='Change')
plt.title('Scatter Plot')
plt.xlabel('X-axis')
plt.ylabel('Y-axis')
plt.legend()
self.savefig(self, path)
plt.show()
# self.savefig()
def barplot(self, xcord, ycord, col, path):
'''
:param xcord:It is a list of x coordinates
:param ycord: It is a list of y coordinates
:param col: Enter color name ,for example Green ,Red , Cyan etc.
'''
plt.bar(xcord, ycord, color=col, label='Change')
plt.title('Bar Plot')
plt.xlabel('X-axis')
plt.ylabel('Y-axis')
plt.legend()
self.savefig(self, path)
plt.show()
def histplot(self, bin1, values, col, path):
'''
:param xcord:It is a list of x coordinates
:param ycord: It is a list of y coordinates
:param col: Enter color name ,for example Green ,Red , Cyan etc.
'''
plt.hist(values, bin1, color=col, label='Change')
plt.title('Histogram')
plt.xlabel('X-axis')
plt.ylabel('Y-axis')
plt.legend()
self.savefig(self, path)
plt.show()
def lineplot(self, xcord, ycord, col, format1, path):
'''
:param xcord: It is a list of x coordinates
:param ycord: It is a list of y coordinates
:param col: Enter color name ,for example Green ,Red , Cyan etc.
:param format1: Enter format style for points ,for example 'd' for diamond ,'+' for plus sign , 'o' for circle
'''
plt.plot(xcord, ycord, color=col, label='Change', marker=format1)
plt.xlabel('X-axis')
plt.ylabel('Y-axis')
plt.legend()
self.savefig(self, path)
plt.show()
def savefig(self, path):
'''
:param path: It is the full path describing where you want to store fig
'''
plt.savefig(path)
# Test Case
# if __name__ == '__main__':
# plot1.scatterplot(plot1,[1,2,3],[4,5,6],'g','/home/shubhi/c1.png')
| apache-2.0 |
francisc0garcia/autonomous_bicycle | test/convert_csv_files.py | 1 | 1323 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri May 26 10:24:07 2017
@author: pach0
"""
import os
from fnmatch import fnmatch
import pandas as pd
root = '/home/pach0/Documents/autonomous_bicycle/code/'
pattern = "*.csv"
filenames = []
files = []
for path, subdirs, files in os.walk(root):
for name in files:
if fnmatch(name, pattern):
filenames.append(os.path.join(path, name))
[files.append(pd.read_csv(f)) for f in filenames]
list_columns = []
list_columns.append('.header.stamp.secs')
list_columns.append('.header.stamp.nsecs')
list_columns.append('.orientation.x')
list_columns.append('.orientation.z')
list_columns.append('.orientation.p')
file_0 = pd.read_csv(filenames[0])
file_1 = pd.read_csv(filenames[1])
columns_valid = file_0.columns.values.tolist()
df_columns_valid = pd.DataFrame(columns_valid)
df_columns_all = pd.DataFrame(list_columns)
df_filtered = pd.merge(df_columns_valid, df_columns_all, how='inner')
file_0.shape
file_0 = file_0.filter(items=list(df_filtered.values.flatten()))
file_0.shape
result = pd.merge(file_0[df_filtered.values], file_1[df_filtered.values.tolist()],
how='outer', indicator=True, suffixes=('_x', '_y'),
on=['.header.stamp.secs', '.header.stamp.nsecs'])
| apache-2.0 |
Clyde-fare/scikit-learn | sklearn/metrics/scorer.py | 211 | 13141 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
| bsd-3-clause |
daler/Pharmacogenomics_Prediction_Pipeline_P3 | doc/custom_dot.py | 4 | 3743 | #!/usr/bin/env python
"""
This script is used to create color-coded sub-DAGs for the documentation. It
is intended to be run from the "doc" directory, and can be triggered by running
the Makefile target "dags".
The "Snakefile" is run with each set of targets defined by the
config.yaml file to create a DAG just for that feature snakefile.
Each rule can therefore show up in multiple DAGs. The `color_lookup` dict
manages what the colors should be.
After the dot-format DAG is created, we do some post-processing to fix colors,
change the shape, etc. Then it's saved to the source/images dir as PDF and PNG.
"""
import pydot
from collections import defaultdict
import yaml
import os
from matplotlib import colors
def color(s):
"""
Convert hex color to space-separated RGB in the range [0-1], as needed by
the `dot` graph layout program.
"""
rgb = colors.ColorConverter().to_rgb(s)
return '"{0} {1} {2}"'.format(*rgb)
# Keys are rule names, colors are anything matplotlib can support (usually hex)
color_lookup = {
'make_lookups': '#6666ff',
'transcript_variant_matrix': '#0066cc',
'transcript_variant_matrix_to_gene_variant_matrix': '#0066cc',
'rnaseq_counts_matrix': "#753b00",
'rnaseq_data_prep': "#753b00",
'compute_zscores': '#cc6600',
'seg_to_bed': '#4c9900',
'multi_intersect': '#4c9900',
'create_cluster_scores': '#4c9900',
'cluster_matrix': '#4c9900',
'create_gene_scores': '#4c9900',
'gene_max_scores_matrix': '#4c9900',
'gene_longest_overlap_scores_matrix': '#4c9900',
}
# This script lives in docs/, so we need to go up one to find the config.
config = yaml.load(open('../config.yaml'))
prefix = config['prefix']
# We'll be iterating through sub-workflows defined in the config, so add the
# main Snakefile as well. The target is the "all_features" rule -- this
# gets us the DAG for the entire combined workflow.
config['features']['all'] = dict(
snakefile='Snakefile', targets='all_features')
for k, v in config['features'].items():
snakefile = v['snakefile']
# The value of output in config.yaml can be a string or dict; convert
# either into a list we can work with
targets = v.get('output', '')
if isinstance(targets, dict):
targets = targets.values()
else:
targets = [targets]
# Fill in the "prefix" from config.yaml
targets = [i.format(prefix=prefix) for i in targets]
# Note we're doing a "cd .." in the subshell to make sure the snakefile
# runs correctly.
cmd = [
'cd .. &&', 'snakemake',
'--rulegraph',
'-s', 'Snakefile']
cmd.extend(targets)
# destination is relative to `..` when within the subshell...
cmd.append('> doc/source/images/%s.dot' % k)
print ' '.join(cmd)
os.system(' '.join(cmd))
# ...but after it's created, we read it from relative to this script.
d = pydot.dot_parser.parse_dot_data(
open('source/images/%s.dot' % k).read())
# Modify attributes
for key, val in d.obj_dict['nodes'].items():
try:
label = val[0]['attributes']['label']
label = label.replace('"', '')
if label in color_lookup:
val[0]['attributes']['color'] = color_lookup[label]
else:
val[0]['attributes']['color'] = color("#888888")
del val[0]['attributes']['style']
except KeyError:
continue
# Gets rid of "rounded" style
del d.obj_dict['nodes']['node'][0]['attributes']['style']
# Optionally lay out the graph from left-to-right
# d.obj_dict['attributes']['rankdir'] = '"LR"'
d.write_pdf('source/images/%s_dag.pdf' % k)
d.write_png('source/images/%s_dag.png' % k)
| cc0-1.0 |
ankurankan/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 41 | 4827 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
craffel/mir_eval | tests/mpl_ic.py | 3 | 12171 | # CREATED:2015-02-17 14:41:28 by Brian McFee <brian.mcfee@nyu.edu>
# this function is lifted wholesale from matploblib v1.4.2,
# and modified so that images are stored explicitly under the tests path
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import functools
import gc
import os
import sys
import shutil
import warnings
import unittest
import nose
import numpy as np
import matplotlib.units
from matplotlib import cbook
from matplotlib import ticker
from matplotlib import pyplot as plt
from matplotlib import ft2font
from matplotlib.testing.noseclasses import KnownFailure
from matplotlib.testing.exceptions import ImageComparisonFailure
from matplotlib.testing.compare import comparable_formats, compare_images, \
make_test_filename
def knownfailureif(fail_condition, msg=None, known_exception_class=None):
"""
Assume a will fail if *fail_condition* is True. *fail_condition*
may also be False or the string 'indeterminate'.
*msg* is the error message displayed for the test.
If *known_exception_class* is not None, the failure is only known
if the exception is an instance of this class. (Default = None)
"""
# based on numpy.testing.dec.knownfailureif
if msg is None:
msg = 'Test known to fail'
def known_fail_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
def failer(*args, **kwargs):
try:
# Always run the test (to generate images).
result = f(*args, **kwargs)
except Exception as err:
if fail_condition:
if known_exception_class is not None:
if not isinstance(err, known_exception_class):
# This is not the expected exception
raise
# (Keep the next ultra-long comment so in shows in
# console.)
# An error here when running nose means that you don't have
# the matplotlib.testing.noseclasses:KnownFailure plugin in
# use.
raise KnownFailure(msg)
else:
raise
if fail_condition and fail_condition != 'indeterminate':
raise KnownFailureDidNotFailTest(msg)
return result
return nose.tools.make_decorator(f)(failer)
return known_fail_decorator
def _do_cleanup(original_units_registry):
plt.close('all')
gc.collect()
import matplotlib.testing
matplotlib.testing.setup()
matplotlib.units.registry.clear()
matplotlib.units.registry.update(original_units_registry)
warnings.resetwarnings() # reset any warning filters set in tests
class KnownFailureDidNotFailTest(KnownFailure):
pass
class CleanupTest(object):
@classmethod
def setup_class(cls):
cls.original_units_registry = matplotlib.units.registry.copy()
@classmethod
def teardown_class(cls):
_do_cleanup(cls.original_units_registry)
def test(self):
self._func()
class CleanupTestCase(unittest.TestCase):
'''A wrapper for unittest.TestCase that includes cleanup operations'''
@classmethod
def setUpClass(cls):
import matplotlib.units
cls.original_units_registry = matplotlib.units.registry.copy()
@classmethod
def tearDownClass(cls):
_do_cleanup(cls.original_units_registry)
def cleanup(func):
@functools.wraps(func)
def wrapped_function(*args, **kwargs):
original_units_registry = matplotlib.units.registry.copy()
try:
func(*args, **kwargs)
finally:
_do_cleanup(original_units_registry)
return wrapped_function
def check_freetype_version(ver):
if ver is None:
return True
from distutils import version
if isinstance(ver, six.string_types):
ver = (ver, ver)
ver = [version.StrictVersion(x) for x in ver]
found = version.StrictVersion(ft2font.__freetype_version__)
return found >= ver[0] and found <= ver[1]
class ImageComparisonTest(CleanupTest):
@classmethod
def setup_class(cls):
CleanupTest.setup_class()
cls._func()
@staticmethod
def remove_text(figure):
figure.suptitle("")
for ax in figure.get_axes():
ax.set_title("")
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
try:
ax.zaxis.set_major_formatter(ticker.NullFormatter())
ax.zaxis.set_minor_formatter(ticker.NullFormatter())
except AttributeError:
pass
def test(self):
baseline_dir, result_dir = _image_directories(self._func)
for fignum, baseline in zip(plt.get_fignums(), self._baseline_images):
for extension in self._extensions:
will_fail = extension not in comparable_formats()
if will_fail:
fail_msg = ('Cannot compare %s files on this system' %
extension)
else:
fail_msg = 'No failure expected'
orig_expected_fname = (
os.path.join(baseline_dir, baseline) + '.' + extension)
if (extension == 'eps' and
not os.path.exists(orig_expected_fname)):
orig_expected_fname = (
os.path.join(baseline_dir, baseline) + '.pdf')
expected_fname = make_test_filename(os.path.join(
result_dir,
os.path.basename(orig_expected_fname)), 'expected')
actual_fname = (
os.path.join(result_dir, baseline) + '.' + extension)
if os.path.exists(orig_expected_fname):
shutil.copyfile(orig_expected_fname, expected_fname)
else:
will_fail = True
fail_msg = 'Do not have baseline image %s' % expected_fname
@knownfailureif(
will_fail, fail_msg,
known_exception_class=ImageComparisonFailure)
def do_test():
figure = plt.figure(fignum)
if self._remove_text:
self.remove_text(figure)
figure.savefig(actual_fname, **self._savefig_kwarg)
plt.close(figure)
err = compare_images(expected_fname, actual_fname,
self._tol, in_decorator=True)
try:
if not os.path.exists(expected_fname):
raise ImageComparisonFailure(
'image does not exist: %s' % expected_fname)
if err:
raise ImageComparisonFailure(
'images not close: %(actual)s vs. %(expected)s'
' (RMS %(rms).3f)' % err)
except ImageComparisonFailure:
if not check_freetype_version(self._freetype_version):
raise KnownFailure(
"Mismatched version of freetype. Test "
"requires '%s', you have '%s'" %
(self._freetype_version,
ft2font.__freetype_version__))
raise
yield (do_test,)
def image_comparison(baseline_images=None, extensions=None, tol=13,
freetype_version=None, remove_text=False,
savefig_kwarg=None):
"""
call signature::
image_comparison(baseline_images=['my_figure'], extensions=None)
Compare images generated by the test with those specified in
*baseline_images*, which must correspond else an
ImageComparisonFailure exception will be raised.
Keyword arguments:
*baseline_images*: list
A list of strings specifying the names of the images generated
by calls to :meth:`matplotlib.figure.savefig`.
*extensions*: [ None | list ]
If *None*, default to all supported extensions.
Otherwise, a list of extensions to test. For example ['png','pdf'].
*tol*: (default 13)
The RMS threshold above which the test is considered failed.
*freetype_version*: str or tuple
The expected freetype version or range of versions for this
test to pass.
*remove_text*: bool
Remove the title and tick text from the figure before
comparison. This does not remove other, more deliberate,
text, such as legends and annotations.
*savefig_kwarg*: dict
Optional arguments that are passed to the savefig method.
"""
if baseline_images is None:
raise ValueError('baseline_images must be specified')
if extensions is None:
# default extensions to test
extensions = ['png', 'pdf', 'svg']
if savefig_kwarg is None:
# default no kwargs to savefig
savefig_kwarg = dict()
def compare_images_decorator(func):
# We want to run the setup function (the actual test function
# that generates the figure objects) only once for each type
# of output file. The only way to achieve this with nose
# appears to be to create a test class with "setup_class" and
# "teardown_class" methods. Creating a class instance doesn't
# work, so we use type() to actually create a class and fill
# it with the appropriate methods.
name = func.__name__
# For nose 1.0, we need to rename the test function to
# something without the word "test", or it will be run as
# well, outside of the context of our image comparison test
# generator.
func = staticmethod(func)
func.__get__(1).__name__ = str('_private')
new_class = type(
name,
(ImageComparisonTest,),
{'_func': func,
'_baseline_images': baseline_images,
'_extensions': extensions,
'_tol': tol,
'_freetype_version': freetype_version,
'_remove_text': remove_text,
'_savefig_kwarg': savefig_kwarg})
return new_class
return compare_images_decorator
def _image_directories(func):
"""
Compute the baseline and result image directories for testing *func*.
Create the result directory if it doesn't exist.
"""
module_name = func.__module__
# mods = module_name.split('.')
# mods.pop(0) # <- will be the name of the package being tested (in
# most cases "matplotlib")
# assert mods.pop(0) == 'tests'
# subdir = os.path.join(*mods)
subdir = module_name
import imp
def find_dotted_module(module_name, path=None):
"""A version of imp which can handle dots in the module name"""
res = None
for sub_mod in module_name.split('.'):
try:
res = file, path, _ = imp.find_module(sub_mod, path)
path = [path]
if file is not None:
file.close()
except ImportError:
# assume namespace package
path = sys.modules[sub_mod].__path__
res = None, path, None
return res
mod_file = find_dotted_module(func.__module__)[1]
basedir = os.path.dirname(mod_file)
baseline_dir = os.path.join(basedir, 'baseline_images', subdir)
result_dir = os.path.abspath(os.path.join('result_images', subdir))
if not os.path.exists(result_dir):
cbook.mkdirs(result_dir)
return baseline_dir, result_dir
| mit |
yask123/scikit-learn | sklearn/cross_decomposition/pls_.py | 187 | 28507 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <edouard.duchesnay@cea.fr>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| bsd-3-clause |
RuthAngus/LSST-max | code/clusters.py | 1 | 7180 | # coding: utf-8
# # Recovering rotation periods in simulated LSST data
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from gatspy.periodic import LombScargle
from toy_simulator import simulate_LSST
import simple_gyro as sg
import pandas as pd
import sys
def find_nearest(array, value):
"""
Match a period to a bin.
array: array of bin heights.
value: the period of the star.
Returns the value and index of the bin.
"""
m = np.abs(array-value) == np.abs(array-value).min()
return array[m], m
def assign_amps(ps, log10P, log10R, stdR):
"""
Take periods and bin values and return an array of amplitudes.
"""
npi = np.array([find_nearest(10**log10P, p) for p in ps])
nearest_ps, inds = npi[:, 0], npi[:, 1]
log_ranges = np.array([log10R[i] for i in inds])[:, 0]
std_ranges = np.array([stdR[i] for i in inds])[:, 0]
return np.random.randn(len(ps))*std_ranges + log_ranges
def make_arrays(data, temp_bin, ps, teff, rmag):
"""
Amplitude arrays for each temperature bin
"""
P, R, std = np.array(data["log10P"]), np.array(data["log10R"]), \
np.array(data["stdR"])
if temp_bin == 3500:
m = teff < 3750
elif temp_bin == 6000:
m = teff > 6000
else:
m = (temp_bin - 250 < teff) * (teff < temp_bin + 250)
periods, teffs, rmags = ps[m], teff[m], rmag[m]
amplitudes = assign_amps(periods, P, R, std)
return periods, amplitudes, teffs, rmags
def LSST_sig(m):
"""
Approximate the noise in figure 2 of arxiv:1603.06638 from the apparent
r-mag.
Returns the noise in magnitudes and ppm.
"""
if m < 19:
return .005
mags = np.array([19, 20, 21, 22, 23, 24, 25])
sigs = np.array([.005, .007, .01, .02, .03, .1, .2])
return sigs[np.abs(mags - m) == np.abs(mags-m).min()][0]
def pgram(N, years, fname):
ps = np.linspace(2, 100, 1000) # the period array (in days)
print("Computing periodograms")
# Now compute LS pgrams for a set of LSST light curves & save highest peak
ids = np.arange(N)
periods = np.zeros_like(ids)
for i, id in enumerate(ids):
sid = str(int(id)).zfill(4)
x, y, yerr = np.genfromtxt("simulations/{0}/{1}.txt".format(fname,
sid)).T
m = x < years * 365.25
xt, yt, yerrt = x[m], y[m], yerr[m][m]
model = LombScargle().fit(xt, yt, yerrt) # compute pgram
pgram = model.periodogram(ps)
# find peaks
peaks = np.array([j for j in range(1, len(ps)-1) if pgram[j-1]
< pgram[j] and pgram[j+1] < pgram[j]])
if len(peaks):
period = ps[pgram == max(pgram[peaks])][0]
else:
period = 0
periods[i] = period
np.savetxt("results/{0}/{1}_{2}yr_result.txt".format(fname, sid,
years), [period])
np.savetxt("{0}_{1}yr_results.txt".format(fname, years), periods.T)
return periods
def inject(fname):
"""
Simulate rotation periods for LSST targets and attempt to recover those
rotation periods.
Saves an array of injected periods (days), recovered periods (days), Teff,
rmag, injected amplitudes (ppm) and noise (ppm).
'true_ps, periods, logamps, teffs, rmags, true_as, noises_ppm'
"""
print("Loading Cluster file...")
# Randomly select targets from a TRILEGAL output.
logAges, bvs, logTeff, rmag = np.genfromtxt("{0}.dat".format(fname)).T
teff = 10**logTeff
# Calculate periods from ages and colours for cool stars
m = bvs > .4 # select only cool stars
cool_ages = 10**logAges[m] * 1e-9
cool_ps = sg.period(cool_ages, bvs[m])
cool_teffs = teff[m]
cool_rmags = rmag[m]
# Draw from a sum of two Gaussians (modelled in another notebook) that
# describes the period distribution for hot stars. Approximations:
# I have lumped all stars with colour < 0.4 in together AND I actually
# used teff = 6250, not B-V = 0.4 in the other notebook.
hot_ages = 10**logAges[~m] * 1e-9 # select hot stars
hot_teffs = teff[~m]
hot_rmags = rmag[~m]
# copy parameters for two Gaussians from hot_stars ipython notebook
A1, A2, mu1, mu2, sig1, sig2 = 254.11651209, 49.8149765, 3.00751724, \
3.73399554, 2.26525979, 8.31739725
hot_ps = np.zeros_like(hot_ages)
hot_ps1 = np.random.randn(int(len(hot_ages)*(1 - A2/A1)))*sig1 + mu1
hot_ps2 = np.random.randn(int(len(hot_ages)*(A2/A1)))*sig2 + mu2
hot_ps[:len(hot_ps1)] = hot_ps1
hot_ps[len(hot_ps1):len(hot_ps1) + len(hot_ps2)] = hot_ps2
tot = len(hot_ps1) + len(hot_ps2)
hot_ps[tot:] = np.random.randn(len(hot_ps)-tot)*sig2 + mu2
# combine the modes
age = np.concatenate((cool_ages, hot_ages))
ps = np.concatenate((cool_ps, hot_ps))
teff = np.concatenate((cool_teffs, hot_teffs))
rmag = np.concatenate((cool_rmags, hot_rmags))
print("Calculating amplitudes...")
# Use Derek's results to calculate amplitudes
# Column headings: log10P, log10R, stdR, Nbin
d35 = pd.read_csv("data/rot_v_act3500.txt")
d40 = pd.read_csv("data/rot_v_act4000.txt")
d45 = pd.read_csv("data/rot_v_act4500.txt")
d50 = pd.read_csv("data/rot_v_act5000.txt")
d55 = pd.read_csv("data/rot_v_act5500.txt")
d60 = pd.read_csv("data/rot_v_act6000.txt")
# Assign amplitudes
pers, logamps, teffs, rmags = \
np.concatenate((make_arrays(d35, 3500, ps, teff, rmag),
make_arrays(d40, 4000, ps, teff, rmag),
make_arrays(d45, 4500, ps, teff, rmag),
make_arrays(d50, 5000, ps, teff, rmag),
make_arrays(d55, 5500, ps, teff, rmag)),
axis=1)
# make_arrays(d60, 6000, ps, teff, rmag)),
amps = 10**logamps # parts per million
noises_mag = np.array([LSST_sig(mag) for mag in rmags])
noises_ppm = (1 - 10**(-noises_mag/2.5)) * 1e6
# Simulate light curves
print("Simulating light curves...")
path = "simulations/{0}".format(fname) # where to save the lcs
[simulate_LSST(i, pers[i], amps[i], path, noises_ppm[i]) for i in
range(len(pers))]
# save the true values
ids = np.arange(len(pers))
data = np.vstack((ids, pers, amps))
np.savetxt("{0}/truth.txt".format(path), data.T)
print("Saving results")
data = np.vstack((pers, amps, teffs, rmags, noises_ppm))
np.savetxt("parameters_{0}.txt".format(fname), data.T)
return pers, amps, teffs, rmags, noises_ppm
if __name__ == "__main__":
fname = "{0}".format(sys.argv[1])
# Run simlations
pers, amps, teffs, rmags, noises_ppm = inject("{0}".format(fname))
# recover periods
pers, amps, teffs, rmags, noises_ppm = \
np.genfromtxt("parameters_{0}.txt".format(fname)).T
N = len(pers)
years = [1, 5, 10]
for year in years:
periods = pgram(N, year, fname)
data = np.vstack((pers, periods, np.log(amps), teffs, rmags, amps,
noises_ppm))
np.savetxt("{0}yr_results{1}.txt".format(year, fname), data.T)
| mit |
debsankha/networkx | examples/algorithms/blockmodel.py | 32 | 3009 | #!/usr/bin/env python
# encoding: utf-8
"""
Example of creating a block model using the blockmodel function in NX. Data used is the Hartford, CT drug users network:
@article{,
title = {Social Networks of Drug Users in {High-Risk} Sites: Finding the Connections},
volume = {6},
shorttitle = {Social Networks of Drug Users in {High-Risk} Sites},
url = {http://dx.doi.org/10.1023/A:1015457400897},
doi = {10.1023/A:1015457400897},
number = {2},
journal = {{AIDS} and Behavior},
author = {Margaret R. Weeks and Scott Clair and Stephen P. Borgatti and Kim Radda and Jean J. Schensul},
month = jun,
year = {2002},
pages = {193--206}
}
"""
__author__ = """\n""".join(['Drew Conway <drew.conway@nyu.edu>',
'Aric Hagberg <hagberg@lanl.gov>'])
from collections import defaultdict
import networkx as nx
import numpy
from scipy.cluster import hierarchy
from scipy.spatial import distance
import matplotlib.pyplot as plt
def create_hc(G):
"""Creates hierarchical cluster of graph G from distance matrix"""
path_length=nx.all_pairs_shortest_path_length(G)
distances=numpy.zeros((len(G),len(G)))
for u,p in path_length.items():
for v,d in p.items():
distances[u][v]=d
# Create hierarchical cluster
Y=distance.squareform(distances)
Z=hierarchy.complete(Y) # Creates HC using farthest point linkage
# This partition selection is arbitrary, for illustrive purposes
membership=list(hierarchy.fcluster(Z,t=1.15))
# Create collection of lists for blockmodel
partition=defaultdict(list)
for n,p in zip(list(range(len(G))),membership):
partition[p].append(n)
return list(partition.values())
if __name__ == '__main__':
G=nx.read_edgelist("hartford_drug.edgelist")
# Extract largest connected component into graph H
H=nx.connected_component_subgraphs(G)[0]
# Makes life easier to have consecutively labeled integer nodes
H=nx.convert_node_labels_to_integers(H)
# Create parititions with hierarchical clustering
partitions=create_hc(H)
# Build blockmodel graph
BM=nx.blockmodel(H,partitions)
# Draw original graph
pos=nx.spring_layout(H,iterations=100)
fig=plt.figure(1,figsize=(6,10))
ax=fig.add_subplot(211)
nx.draw(H,pos,with_labels=False,node_size=10)
plt.xlim(0,1)
plt.ylim(0,1)
# Draw block model with weighted edges and nodes sized by number of internal nodes
node_size=[BM.node[x]['nnodes']*10 for x in BM.nodes()]
edge_width=[(2*d['weight']) for (u,v,d) in BM.edges(data=True)]
# Set positions to mean of positions of internal nodes from original graph
posBM={}
for n in BM:
xy=numpy.array([pos[u] for u in BM.node[n]['graph']])
posBM[n]=xy.mean(axis=0)
ax=fig.add_subplot(212)
nx.draw(BM,posBM,node_size=node_size,width=edge_width,with_labels=False)
plt.xlim(0,1)
plt.ylim(0,1)
plt.axis('off')
plt.savefig('hartford_drug_block_model.png')
| bsd-3-clause |
yarikoptic/pystatsmodels | statsmodels/datasets/randhie/data.py | 3 | 2604 | """RAND Health Insurance Experiment Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is in the public domain."""
TITLE = __doc__
SOURCE = """
The data was collected by the RAND corporation as part of the Health
Insurance Experiment (HIE).
http://www.rand.org/health/projects/hie/
This data was used in::
Cameron, A.C. amd Trivedi, P.K. 2005. `Microeconometrics: Methods
and Applications,` Cambridge: New York.
And was obtained from: <http://cameron.econ.ucdavis.edu/mmabook/mmadata.html>
See randhie/src for the original data and description. The data included
here contains only a subset of the original data. The data varies slightly
compared to that reported in Cameron and Trivedi.
"""
DESCRSHORT = """The RAND Co. Health Insurance Experiment Data"""
DESCRLONG = """"""
NOTE = """
Number of observations - 20,190
Number of variables - 10
Variable name definitions::
mdvis - Number of outpatient visits to an MD
lncoins - ln(coinsurance + 1), 0 <= coninsurance <= 100
idp - 1 if individual deductible plan, 0 otherwise
lpi - ln(max(1, annual participation incentive payment))
fmde - 0 if idp = 1; ln(max(1, MDE/(0.01 coinsurance))) otherwise
physlm - 1 if the person has a physical limitation
disea - number of chronic diseases
hlthg - 1 if self-rated health is good
hlthf - 1 if self-rated health is fair
hlthp - 1 if self-rated health is poor
(Omitted category is excellent self-rated health)
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
PATH = '%s/%s' % (dirname(abspath(__file__)), 'randhie.csv')
def load():
"""
Loads the RAND HIE data and returns a Dataset class.
----------
endog - response variable, mdvis
exog - design
Returns
Load instance:
a class of the data with array attrbutes 'endog' and 'exog'
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""
Loads the RAND HIE data and returns a Dataset class.
----------
endog - response variable, mdvis
exog - design
Returns
Load instance:
a class of the data with array attrbutes 'endog' and 'exog'
"""
from pandas import read_csv
data = read_csv(PATH)
return du.process_recarray_pandas(data, endog_idx=0)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(PATH, "rb"), delimiter=",", names=True, dtype=float)
return data
| bsd-3-clause |
cagatayyildiz/boun-sim | bcpm/visualize.py | 1 | 1414 | '''
This study is a Bogazici University - NETAS Nova V-Gate collaboration and funded by TEYDEB project "Realization of Anomaly Detection and Prevention with Learning System Architectures, Quality Improvement, High Rate Service Availability and Rich Services in a VoIP Firewall Product'', by the Scientific and Technological Research Council Of Turkey (TUBITAK).
'''
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import sys
from model import Data
def visualize_data(dirname, m, n):
data = Data.load(dirname)
v = data.v.transpose()
t = v.shape[1]
print(t)
if m > 0:
fig = plt.figure(figsize=(12, 4))
ax = fig.gca()
ax.pcolormesh(v[0:m, :], cmap=plt.cm.Greys)
ax.vlines(np.arange(0, t), 0, data.s * m, colors='r', linestyles='-', linewidth=2)
ax.legend(['change points'])
if n > 0:
fig = plt.figure(figsize=(12, 4))
gs = gridspec.GridSpec(n, 1, height_ratios=np.ones(n))
for i in range(n):
ax = plt.subplot(gs[i])
y = v[m + i, :]
y_lim_max = np.max(y) * 1.1
ax.plot(range(t), y, 'b-')
ax.vlines(np.arange(0, t), 0, data.s * y_lim_max, colors='r', linestyles='-', linewidth=2)
ax.set_ylim([0, y_lim_max])
plt.show()
if __name__ == '__main__':
visualize_data(sys.argv[1], int(sys.argv[2]), int(sys.argv[3]))
| gpl-2.0 |
JPFrancoia/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
rahuldhote/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 230 | 5234 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
| bsd-3-clause |
WaldurChatbot/Waldur-Chatbot | common/offline_graphs/totalcosts_offline.py | 1 | 16907 | import json
import collections
import matplotlib
matplotlib.use('Agg') # requirement of matplotlib
import matplotlib.pyplot as plt
import numpy as np
from textwrap import wrap
myinput = """[
{
"url":"https://api.etais.ee/api/invoices/9e67980771a94de3bd0075fe84522b05/",
"uuid":"9e67980771a94de3bd0075fe84522b05",
"number":100151,
"customer":"https://api.etais.ee/api/customers/5991d0c109df4e8cab4f9dd660295517/",
"price":"87.7300000",
"tax":"0.0000000",
"total":"87.7300000",
"state":"pending",
"year":2018,
"month":1,
"issuer_details":{
"phone":{
"national_number":"5555555",
"country_code":"372"
},
"account":"123456789",
"country_code":"EE",
"address":"Lille 4-205",
"country":"Estonia",
"company":"OpenNode",
"postal":"80041",
"vat_code":"EE123456789",
"email":"info@opennodecloud.com",
"bank":"Estonian Bank"
},
"invoice_date":null,
"due_date":null,
"customer_details":null,
"openstack_items":[
{
"name":"WaldurChatbot (Small / Generic)",
"price":87.73,
"tax":"0.0000000",
"total":"87.7300000",
"unit_price":"2.8300000",
"unit":"day",
"start":"2017-12-01T00:00:00Z",
"end":"2017-12-31T23:59:59.999999Z",
"product_code":"",
"article_code":"",
"project_name":"Waldur Chatbot testbed",
"project_uuid":"88879e68a4c84f6ea0e05fb9bc59ea8f",
"scope_type":"OpenStack.Tenant",
"scope_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"package":"https://api.etais.ee/api/openstack-packages/517047bdfefe418899c981663f1ea5f5/",
"tenant_name":"WaldurChatbot",
"tenant_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"usage_days":31,
"template_name":"Generic",
"template_uuid":"a85daef727d344b3858541e4bc29a274",
"template_category":"Small"
}
],
"offering_items":[
],
"generic_items":[
]
},
{
"url":"https://api.etais.ee/api/invoices/9e67980771a94de3bd0075fe84522b05/",
"uuid":"9e67980771a94de3bd0075fe84522b05",
"number":100151,
"customer":"https://api.etais.ee/api/customers/5991d0c109df4e8cab4f9dd660295517/",
"price":"87.7300000",
"tax":"0.0000000",
"total":"87.7300000",
"state":"pending",
"year":2017,
"month":12,
"issuer_details":{
"phone":{
"national_number":"5555555",
"country_code":"372"
},
"account":"123456789",
"country_code":"EE",
"address":"Lille 4-205",
"country":"Estonia",
"company":"OpenNode",
"postal":"80041",
"vat_code":"EE123456789",
"email":"info@opennodecloud.com",
"bank":"Estonian Bank"
},
"invoice_date":null,
"due_date":null,
"customer_details":null,
"openstack_items":[
{
"name":"WaldurChatbot (Small / Generic)",
"price":87.73,
"tax":"0.0000000",
"total":"87.7300000",
"unit_price":"2.8300000",
"unit":"day",
"start":"2017-12-01T00:00:00Z",
"end":"2017-12-31T23:59:59.999999Z",
"product_code":"",
"article_code":"",
"project_name":"Waldur Chatbot testbed",
"project_uuid":"88879e68a4c84f6ea0e05fb9bc59ea8f",
"scope_type":"OpenStack.Tenant",
"scope_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"package":"https://api.etais.ee/api/openstack-packages/517047bdfefe418899c981663f1ea5f5/",
"tenant_name":"WaldurChatbot",
"tenant_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"usage_days":31,
"template_name":"Generic",
"template_uuid":"a85daef727d344b3858541e4bc29a274",
"template_category":"Small"
}
],
"offering_items":[
],
"generic_items":[
]
},
{
"url":"https://api.etais.ee/api/invoices/59fd12a0d3e34f829d6a0eefd2e5ee41/",
"uuid":"59fd12a0d3e34f829d6a0eefd2e5ee41",
"number":100156,
"customer":"https://api.etais.ee/api/customers/0d689685ab3444bbb592338e24613f03/",
"price":"87.7300000",
"tax":"0.0000000",
"total":"87.7300000",
"state":"pending",
"year":2017,
"month":12,
"issuer_details":{
"phone":{
"national_number":"5555555",
"country_code":"372"
},
"account":"123456789",
"country_code":"EE",
"address":"Lille 4-205",
"country":"Estonia",
"company":"OpenNode",
"postal":"80041",
"vat_code":"EE123456789",
"email":"info@opennodecloud.com",
"bank":"Estonian Bank"
},
"invoice_date":null,
"due_date":null,
"customer_details":null,
"openstack_items":[
{
"name":"Waldur Maie cloud (Small / Generic)",
"price":87.73,
"tax":"0.0000000",
"total":"87.7300000",
"unit_price":"2.8300000",
"unit":"day",
"start":"2017-12-01T00:00:00Z",
"end":"2017-12-31T23:59:59.999999Z",
"product_code":"",
"article_code":"",
"project_name":"W-M project",
"project_uuid":"26fc83e64ea0473fb9f57f0ae978b396",
"scope_type":"OpenStack.Tenant",
"scope_uuid":"1571bca1f6594ad3bede4d2c8d64755a",
"package":"https://api.etais.ee/api/openstack-packages/81e93543103b4cf8a5d3658e026e98f3/",
"tenant_name":"Waldur Maie cloud",
"tenant_uuid":"1571bca1f6594ad3bede4d2c8d64755a",
"usage_days":31,
"template_name":"Generic",
"template_uuid":"a85daef727d344b3858541e4bc29a274",
"template_category":"Small"
}
],
"offering_items":[
],
"generic_items":[
]
},
{
"url":"https://api.etais.ee/api/invoices/bb6f38e908e7493791c65b26e88e1619/",
"uuid":"bb6f38e908e7493791c65b26e88e1619",
"number":100121,
"customer":"https://api.etais.ee/api/customers/5991d0c109df4e8cab4f9dd660295517/",
"price":"84.9000000",
"tax":"0.0000000",
"total":"84.9000000",
"state":"created",
"year":2017,
"month":11,
"issuer_details":{
"phone":{
"national_number":"5555555",
"country_code":"372"
},
"account":"123456789",
"country_code":"EE",
"address":"Lille 4-205",
"country":"Estonia",
"company":"OpenNode",
"postal":"80041",
"vat_code":"EE123456789",
"email":"info@opennodecloud.com",
"bank":"Estonian Bank"
},
"invoice_date":"2017-12-01",
"due_date":"2017-12-31",
"customer_details":null,
"openstack_items":[
{
"name":"WaldurChatbot (Small / Generic)",
"price":84.9,
"tax":"0.0000000",
"total":"84.9000000",
"unit_price":"2.8300000",
"unit":"day",
"start":"2017-11-01T00:00:00Z",
"end":"2017-11-30T23:59:59.999999Z",
"product_code":"",
"article_code":"",
"project_name":"Waldur Chatbot testbed",
"project_uuid":"88879e68a4c84f6ea0e05fb9bc59ea8f",
"scope_type":"OpenStack.Tenant",
"scope_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"package":"https://api.etais.ee/api/openstack-packages/517047bdfefe418899c981663f1ea5f5/",
"tenant_name":"WaldurChatbot",
"tenant_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"usage_days":30,
"template_name":"Generic",
"template_uuid":"a85daef727d344b3858541e4bc29a274",
"template_category":"Small"
}
],
"offering_items":[
],
"generic_items":[
]
},
{
"url":"https://api.etais.ee/api/invoices/d13cdd4ef4d2478e8e0cf0961d20e6f2/",
"uuid":"d13cdd4ef4d2478e8e0cf0961d20e6f2",
"number":100129,
"customer":"https://api.etais.ee/api/customers/0d689685ab3444bbb592338e24613f03/",
"price":"53.7700000",
"tax":"0.0000000",
"total":"53.7700000",
"state":"created",
"year":2017,
"month":11,
"issuer_details":{
"phone":{
"national_number":"5555555",
"country_code":"372"
},
"account":"123456789",
"country_code":"EE",
"address":"Lille 4-205",
"country":"Estonia",
"company":"OpenNode",
"postal":"80041",
"vat_code":"EE123456789",
"email":"info@opennodecloud.com",
"bank":"Estonian Bank"
},
"invoice_date":"2017-12-01",
"due_date":"2017-12-31",
"customer_details":null,
"openstack_items":[
{
"name":"Waldur Maie cloud (Small / Generic)",
"price":53.77,
"tax":"0.0000000",
"total":"53.7700000",
"unit_price":"2.8300000",
"unit":"day",
"start":"2017-11-12T11:29:21.522230Z",
"end":"2017-11-30T23:59:59.999999Z",
"product_code":"",
"article_code":"",
"project_name":"W-M project",
"project_uuid":"26fc83e64ea0473fb9f57f0ae978b396",
"scope_type":"OpenStack.Tenant",
"scope_uuid":"1571bca1f6594ad3bede4d2c8d64755a",
"package":"https://api.etais.ee/api/openstack-packages/81e93543103b4cf8a5d3658e026e98f3/",
"tenant_name":"Waldur Maie cloud",
"tenant_uuid":"1571bca1f6594ad3bede4d2c8d64755a",
"usage_days":19,
"template_name":"Generic",
"template_uuid":"a85daef727d344b3858541e4bc29a274",
"template_category":"Small"
}
],
"offering_items":[
],
"generic_items":[
]
},
{
"url":"https://api.etais.ee/api/invoices/b094173f50a848e19d3362c84eabebc4/",
"uuid":"b094173f50a848e19d3362c84eabebc4",
"number":100096,
"customer":"https://api.etais.ee/api/customers/5991d0c109df4e8cab4f9dd660295517/",
"price":"87.7300000",
"tax":"0.0000000",
"total":"87.7300000",
"state":"created",
"year":2017,
"month":10,
"issuer_details":{
"phone":{
"national_number":"5555555",
"country_code":"372"
},
"account":"123456789",
"country_code":"EE",
"address":"Lille 4-205",
"country":"Estonia",
"company":"OpenNode",
"postal":"80041",
"vat_code":"EE123456789",
"email":"info@opennodecloud.com",
"bank":"Estonian Bank"
},
"invoice_date":"2017-11-01",
"due_date":"2017-12-01",
"customer_details":null,
"openstack_items":[
{
"name":"WaldurChatbot (Small / Generic)",
"price":87.73,
"tax":"0.0000000",
"total":"87.7300000",
"unit_price":"2.8300000",
"unit":"day",
"start":"2017-10-01T00:00:00Z",
"end":"2017-10-31T23:59:59.999999Z",
"product_code":"",
"article_code":"",
"project_name":"Waldur Chatbot testbed",
"project_uuid":"88879e68a4c84f6ea0e05fb9bc59ea8f",
"scope_type":"OpenStack.Tenant",
"scope_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"package":"https://api.etais.ee/api/openstack-packages/517047bdfefe418899c981663f1ea5f5/",
"tenant_name":"WaldurChatbot",
"tenant_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"usage_days":31,
"template_name":"Generic",
"template_uuid":"a85daef727d344b3858541e4bc29a274",
"template_category":"Small"
}
],
"offering_items":[
],
"generic_items":[
]
},
{
"url":"https://api.etais.ee/api/invoices/b636ee1236e0486994cdd1ffda4c7e1d/",
"uuid":"b636ee1236e0486994cdd1ffda4c7e1d",
"number":100076,
"customer":"https://api.etais.ee/api/customers/5991d0c109df4e8cab4f9dd660295517/",
"price":"11.3200000",
"tax":"0.0000000",
"total":"11.3200000",
"state":"created",
"year":2017,
"month":9,
"issuer_details":{
"phone":{
"national_number":"5555555",
"country_code":"372"
},
"account":"123456789",
"country_code":"EE",
"address":"Lille 4-205",
"country":"Estonia",
"company":"OpenNode",
"postal":"80041",
"vat_code":"EE123456789",
"email":"info@opennodecloud.com",
"bank":"Estonian Bank"
},
"invoice_date":"2017-10-01",
"due_date":"2017-10-31",
"customer_details":null,
"openstack_items":[
{
"name":"WaldurChatbot (Small / Generic)",
"price":11.32,
"tax":"0.0000000",
"total":"11.3200000",
"unit_price":"2.8300000",
"unit":"day",
"start":"2017-09-27T13:53:31.425080Z",
"end":"2017-09-30T23:59:59.999999Z",
"product_code":"",
"article_code":"",
"project_name":"Waldur Chatbot testbed",
"project_uuid":"88879e68a4c84f6ea0e05fb9bc59ea8f",
"scope_type":"OpenStack.Tenant",
"scope_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"package":"https://api.etais.ee/api/openstack-packages/517047bdfefe418899c981663f1ea5f5/",
"tenant_name":"WaldurChatbot",
"tenant_uuid":"ed505f9ebd8c491b94c6f8dfc30b54b0",
"usage_days":4,
"template_name":"Generic",
"template_uuid":"a85daef727d344b3858541e4bc29a274",
"template_category":"Small"
}
],
"offering_items":[
],
"generic_items":[
]
}
]"""
data = json.loads(myinput)
num_to_monthdict = {
1:'Jan',
2:'Feb',
3:'Mar',
4:'Apr',
5:'May',
6:'Jun',
7:'Jul',
8:'Aug',
9:'Sep',
10:'Oct',
11:'Nov',
12:'Dec'
}
plotx = []
ploty = []
uuid = '5991d0c109df4e8cab4f9dd660295517'
customer = 'https://api.etais.ee/api/customers/' + uuid + '/'
newlist = []
print(type(data))
print(type(data[0]))
for i in range((len(data)-1), -1, -1):
if data[i]['customer'] == customer:
newlist.append(data[i])
plotx.append(num_to_monthdict[data[i]['month']] + " " + str(data[i]['year']))
ploty.append(float(data[i]['total']))
print("### " + str(len(newlist)))
'''
result = collections.OrderedDict()
for i in range(len(plotx)):
result[plotx[i]] = float(ploty[i])
'''
print(plotx)
print(ploty)
N = len(ploty)
ind = np.arange(N)
width = 0.35
fig, ax = plt.subplots()
rects1 = ax.bar(ind, ploty, width, color='#75ad58')
ax.set_xlabel('Months')
ax.set_ylabel('Total costs')
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(plotx)
title = ax.set_title("\n".join(wrap('Last ' + str(N) + 'month total costs but then everytime the title gets longer '
'omg like wtf when does it stop OMG HELP well okay'
'let me tell you a story all about how'
'my life got turned upside down'
'so id like to take a moment just sit right there', 60)))
def autolabel(rects, ax):
# Get y-axis height to calculate label position from.
(y_bottom, y_top) = ax.get_ylim()
y_height = y_top - y_bottom
for rect in rects:
height = rect.get_height()
label_position = height + (y_height * 0.01)
ax.text(rect.get_x() + rect.get_width()/2., label_position,
'%d' % int(height),
ha='center', va='bottom')
autolabel(rects1, ax)
print()
counter = 1
for child in ax.get_children():
if counter == N:
child.set_color('#2388d6')
print("HERE:" + str(child))
else:
print(child)
counter += 1
real_invoice = matplotlib.patches.Patch(color='#75ad58', label='Invoice')
estimate_invoice = matplotlib.patches.Patch(color='#2388d6', label='Estimation')
plt.legend(handles=[real_invoice, estimate_invoice])
fig.tight_layout()
title.set_y(1.05)
fig.subplots_adjust(top=0.8)
#plt.show()
fig.savefig('foo.png')
| mit |
aloverso/SoftwareSystems | hw04/wave3/thinkdsp.py | 23 | 31996 | """This file contains code used in "Think DSP",
by Allen B. Downey, available from greenteapress.com
Copyright 2013 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import array
import math
import numpy
import random
import scipy
import scipy.stats
import struct
import subprocess
import thinkplot
from fractions import gcd
from wave import open as open_wave
import matplotlib.pyplot as pyplot
PI2 = math.pi * 2
def random_seed(x):
"""Initialize the random and numpy.random generators.
x: int seed
"""
random.seed(x)
numpy.random.seed(x)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class WavFileWriter(object):
"""Writes wav files."""
def __init__(self, filename='sound.wav', framerate=11025):
"""Opens the file and sets parameters.
filename: string
framerate: samples per second
"""
self.filename = filename
self.framerate = framerate
self.nchannels = 1
self.sampwidth = 2
self.bits = self.sampwidth * 8
self.bound = 2**(self.bits-1) - 1
self.fmt = 'h'
self.dtype = numpy.int16
self.fp = open_wave(self.filename, 'w')
self.fp.setnchannels(self.nchannels)
self.fp.setsampwidth(self.sampwidth)
self.fp.setframerate(self.framerate)
def write(self, wave):
"""Writes a wave.
wave: Wave
"""
zs = wave.quantize(self.bound, self.dtype)
self.fp.writeframes(zs.tostring())
def close(self, duration=0):
"""Closes the file.
duration: how many seconds of silence to append
"""
if duration:
self.write(rest(duration))
self.fp.close()
def read_wave(filename='sound.wav'):
"""Reads a wave file.
filename: string
returns: Wave
"""
fp = open_wave(filename, 'r')
nchannels = fp.getnchannels()
nframes = fp.getnframes()
sampwidth = fp.getsampwidth()
framerate = fp.getframerate()
z_str = fp.readframes(nframes)
fp.close()
dtype_map = {1:numpy.int8, 2:numpy.int16}
assert sampwidth in dtype_map
ys = numpy.fromstring(z_str, dtype=dtype_map[sampwidth])
wave = Wave(ys, framerate)
return wave
def play_wave(filename='sound.wav', player='aplay'):
"""Plays a wave file.
filename: string
player: string name of executable that plays wav files
"""
cmd = '%s %s' % (player, filename)
popen = subprocess.Popen(cmd, shell=True)
popen.communicate()
class _SpectrumParent(object):
"""Contains code common to Spectrum and DCT.
"""
@property
def max_freq(self):
return self.framerate / 2.0
@property
def freq_res(self):
return self.max_freq / (len(self.fs) - 1)
def plot(self, low=0, high=None, **options):
"""Plots amplitude vs frequency.
low: int index to start at
high: int index to end at
"""
thinkplot.plot(self.fs[low:high], self.amps[low:high], **options)
def plot_power(self, low=0, high=None, **options):
"""Plots power vs frequency.
low: int index to start at
high: int index to end at
"""
thinkplot.plot(self.fs[low:high], self.power[low:high], **options)
def estimate_slope(self):
"""Runs linear regression on log power vs log frequency.
returns: slope, inter, r2, p, stderr
"""
x = numpy.log(self.fs[1:])
y = numpy.log(self.power[1:])
t = scipy.stats.linregress(x,y)
return t
def peaks(self):
"""Finds the highest peaks and their frequencies.
returns: sorted list of (amplitude, frequency) pairs
"""
t = zip(self.amps, self.fs)
t.sort(reverse=True)
return t
class Spectrum(_SpectrumParent):
"""Represents the spectrum of a signal."""
def __init__(self, hs, framerate):
self.hs = hs
self.framerate = framerate
n = len(hs)
self.fs = numpy.linspace(0, self.max_freq, n)
def __add__(self, other):
if other == 0:
return self
assert self.framerate == other.framerate
hs = self.hs + other.hs
return Spectrum(hs, self.framerate)
__radd__ = __add__
@property
def real(self):
"""Returns the real part of the hs (read-only property)."""
return numpy.real(self.hs)
@property
def imag(self):
"""Returns the imaginary part of the hs (read-only property)."""
return numpy.imag(self.hs)
@property
def amps(self):
"""Returns a sequence of amplitudes (read-only property)."""
return numpy.absolute(self.hs)
@property
def power(self):
"""Returns a sequence of powers (read-only property)."""
return self.amps ** 2
def low_pass(self, cutoff, factor=0):
"""Attenuate frequencies above the cutoff.
cutoff: frequency in Hz
factor: what to multiply the magnitude by
"""
for i in xrange(len(self.hs)):
if self.fs[i] > cutoff:
self.hs[i] *= factor
def high_pass(self, cutoff, factor=0):
"""Attenuate frequencies below the cutoff.
cutoff: frequency in Hz
factor: what to multiply the magnitude by
"""
for i in xrange(len(self.hs)):
if self.fs[i] < cutoff:
self.hs[i] *= factor
def band_stop(self, low_cutoff, high_cutoff, factor=0):
"""Attenuate frequencies between the cutoffs.
low_cutoff: frequency in Hz
high_cutoff: frequency in Hz
factor: what to multiply the magnitude by
"""
for i in xrange(len(self.hs)):
if low_cutoff < self.fs[i] < high_cutoff:
self.hs[i] = 0
def pink_filter(self, beta=1):
"""Apply a filter that would make white noise pink.
beta: exponent of the pink noise
"""
denom = self.fs ** (beta/2.0)
denom[0] = 1
self.hs /= denom
def angles(self, i):
"""Computes phase angles in radians.
returns: list of phase angles
"""
return numpy.angle(self.hs)
def make_integrated_spectrum(self):
"""Makes an integrated spectrum.
"""
cs = numpy.cumsum(self.power)
cs /= cs[-1]
return IntegratedSpectrum(cs, self.fs)
def make_wave(self):
"""Transforms to the time domain.
returns: Wave
"""
ys = numpy.fft.irfft(self.hs)
return Wave(ys, self.framerate)
class IntegratedSpectrum(object):
"""Represents the integral of a spectrum."""
def __init__(self, cs, fs):
"""Initializes an integrated spectrum:
cs: sequence of cumulative amplitudes
fs: sequence of frequences
"""
self.cs = cs
self.fs = fs
def plot_power(self, low=0, high=None, expo=False, **options):
"""Plots the integrated spectrum.
low: int index to start at
high: int index to end at
"""
cs = self.cs[low:high]
fs = self.fs[low:high]
if expo:
cs = numpy.exp(cs)
thinkplot.Plot(fs, cs, **options)
def estimate_slope(self, low=1, high=-12000):
"""Runs linear regression on log cumulative power vs log frequency.
returns: slope, inter, r2, p, stderr
"""
#print self.fs[low:high]
#print self.cs[low:high]
x = numpy.log(self.fs[low:high])
y = numpy.log(self.cs[low:high])
t = scipy.stats.linregress(x,y)
return t
class Dct(_SpectrumParent):
"""Represents the spectrum of a signal."""
def __init__(self, amps, framerate):
self.amps = amps
self.framerate = framerate
n = len(amps)
self.fs = numpy.arange(n) / float(n) * self.max_freq
def make_wave(self):
"""Transforms to the time domain.
returns: Wave
"""
ys = scipy.fftpack.dct(self.amps, type=3) / 2
return Wave(ys, self.framerate)
class Spectrogram(object):
"""Represents the spectrum of a signal."""
def __init__(self, spec_map, seg_length, window_func=None):
"""Initialize the spectrogram.
spec_map: map from float time to Spectrum
seg_length: number of samples in each segment
window_func: function that computes the window
"""
self.spec_map = spec_map
self.seg_length = seg_length
self.window_func = window_func
def any_spectrum(self):
"""Returns an arbitrary spectrum from the spectrogram."""
return self.spec_map.itervalues().next()
@property
def time_res(self):
"""Time resolution in seconds."""
spectrum = self.any_spectrum()
return float(self.seg_length) / spectrum.framerate
@property
def freq_res(self):
"""Frequency resolution in Hz."""
return self.any_spectrum().freq_res
def times(self):
"""Sorted sequence of times.
returns: sequence of float times in seconds
"""
ts = sorted(self.spec_map.iterkeys())
return ts
def frequencies(self):
"""Sequence of frequencies.
returns: sequence of float freqencies in Hz.
"""
fs = self.any_spectrum().fs
return fs
def plot(self, low=0, high=None, **options):
"""Make a pseudocolor plot.
low: index of the lowest frequency component to plot
high: index of the highest frequency component to plot
"""
ts = self.times()
fs = self.frequencies()[low:high]
# make the array
size = len(fs), len(ts)
array = numpy.zeros(size, dtype=numpy.float)
# copy amplitude from each spectrum into a column of the array
for i, t in enumerate(ts):
spectrum = self.spec_map[t]
array[:,i] = spectrum.amps[low:high]
thinkplot.pcolor(ts, fs, array, **options)
def make_wave(self):
"""Inverts the spectrogram and returns a Wave.
returns: Wave
"""
res = []
for t, spectrum in sorted(self.spec_map.iteritems()):
wave = spectrum.make_wave()
n = len(wave)
if self.window_func:
window = 1 / self.window_func(n)
wave.window(window)
i = int(round(t * wave.framerate))
start = i - n / 2
end = start + n
res.append((start, end, wave))
starts, ends, waves = zip(*res)
low = min(starts)
high = max(ends)
ys = numpy.zeros(high-low, numpy.float)
for start, end, wave in res:
ys[start:end] = wave.ys
return Wave(ys, wave.framerate)
class Wave(object):
"""Represents a discrete-time waveform.
Note: the ys attribute is a "wave array" which is a numpy
array of floats.
"""
def __init__(self, ys, framerate, start=0):
"""Initializes the wave.
ys: wave array
framerate: samples per second
"""
self.ys = ys
self.framerate = framerate
self.start = start
def __len__(self):
return len(self.ys)
@property
def duration(self):
"""Duration (property).
returns: float duration in seconds
"""
return len(self.ys) / float(self.framerate)
def __or__(self, other):
"""Concatenates two waves.
other: Wave
returns: Wave
"""
if self.framerate != other.framerate:
raise ValueError('Wave.__or__: framerates do not agree')
ys = numpy.concatenate((self.ys, other.ys))
return Wave(ys, self.framerate)
def quantize(self, bound, dtype):
"""Maps the waveform to quanta.
bound: maximum amplitude
dtype: numpy data type or string
returns: quantized signal
"""
return quantize(self.ys, bound, dtype)
def apodize(self, denom=20, duration=0.1):
"""Tapers the amplitude at the beginning and end of the signal.
Tapers either the given duration of time or the given
fraction of the total duration, whichever is less.
denom: float fraction of the segment to taper
duration: float duration of the taper in seconds
"""
self.ys = apodize(self.ys, self.framerate, denom, duration)
def hamming(self):
"""Apply a Hamming window to the wave.
"""
self.ys *= numpy.hamming(len(self.ys))
def window(self, window):
"""Apply a window to the wave.
window: sequence of multipliers, same length as self.ys
"""
self.ys *= window
def normalize(self, amp=1.0):
"""Normalizes the signal to the given amplitude.
amp: float amplitude
"""
self.ys = normalize(self.ys, amp=amp)
def unbias(self):
"""Unbiases the signal.
"""
self.ys = unbias(self.ys)
def segment(self, start=0, duration=None):
"""Extracts a segment.
start: float start time in seconds
duration: float duration in seconds
returns: Wave
"""
i = start * self.framerate
if duration is None:
j = None
else:
j = i + duration * self.framerate
ys = self.ys[i:j]
return Wave(ys, self.framerate)
def make_spectrum(self):
"""Computes the spectrum using FFT.
returns: Spectrum
"""
hs = numpy.fft.rfft(self.ys)
return Spectrum(hs, self.framerate)
def make_dct(self):
amps = scipy.fftpack.dct(self.ys, type=2)
return Dct(amps, self.framerate)
def make_spectrogram(self, seg_length, window_func=numpy.hamming):
"""Computes the spectrogram of the wave.
seg_length: number of samples in each segment
window_func: function used to compute the window
returns: Spectrogram
"""
n = len(self.ys)
window = window_func(seg_length)
start, end, step = 0, seg_length, seg_length / 2
spec_map = {}
while end < n:
ys = self.ys[start:end] * window
hs = numpy.fft.rfft(ys)
t = (start + end) / 2.0 / self.framerate
spec_map[t] = Spectrum(hs, self.framerate)
start += step
end += step
return Spectrogram(spec_map, seg_length, window_func)
def plot(self, **options):
"""Plots the wave.
"""
n = len(self.ys)
ts = numpy.linspace(0, self.duration, n)
thinkplot.plot(ts, self.ys, **options)
def corr(self, other):
"""Correlation coefficient two waves.
other: Wave
returns: 2x2 covariance matrix
"""
mat = self.cov_mat(other)
corr = mat[0][1] / math.sqrt(mat[0][0] * mat[1][1])
return corr
def cov_mat(self, other):
"""Covariance matrix of two waves.
other: Wave
returns: 2x2 covariance matrix
"""
return numpy.cov(self.ys, other.ys)
def cov(self, other):
"""Covariance of two unbiased waves.
other: Wave
returns: float
"""
total = sum(self.ys * other.ys) / len(self.ys)
return total
def cos_cov(self, k):
"""Covariance with a cosine signal.
freq: freq of the cosine signal in Hz
returns: float covariance
"""
n = len(self.ys)
factor = math.pi * k / n
ys = [math.cos(factor * (i+0.5)) for i in range(n)]
total = 2 * sum(self.ys * ys)
return total
def cos_transform(self):
"""Discrete cosine transform.
returns: list of frequency, cov pairs
"""
n = len(self.ys)
res = []
for k in range(n):
cov = self.cos_cov(k)
res.append((k, cov))
return res
def write(self, filename='sound.wav'):
"""Write a wave file.
filename: string
"""
print 'Writing', filename
wfile = WavFileWriter(filename, self.framerate)
wfile.write(self)
wfile.close()
def play(self, filename='sound.wav'):
"""Plays a wave file.
filename: string
"""
self.write(filename)
play_wave(filename)
def unbias(ys):
"""Shifts a wave array so it has mean 0.
ys: wave array
returns: wave array
"""
return ys - ys.mean()
def normalize(ys, amp=1.0):
"""Normalizes a wave array so the maximum amplitude is +amp or -amp.
ys: wave array
amp: max amplitude (pos or neg) in result
returns: wave array
"""
high, low = abs(max(ys)), abs(min(ys))
return amp * ys / max(high, low)
def quantize(ys, bound, dtype):
"""Maps the waveform to quanta.
ys: wave array
bound: maximum amplitude
dtype: numpy data type of the result
returns: quantized signal
"""
if max(ys) > 1 or min(ys) < -1:
print 'Warning: normalizing before quantizing.'
ys = normalize(ys)
zs = (ys * bound).astype(dtype)
return zs
def apodize(ys, framerate, denom=20, duration=0.1):
"""Tapers the amplitude at the beginning and end of the signal.
Tapers either the given duration of time or the given
fraction of the total duration, whichever is less.
ys: wave array
framerate: int frames per second
denom: float fraction of the segment to taper
duration: float duration of the taper in seconds
returns: wave array
"""
# a fixed fraction of the segment
n = len(ys)
k1 = n / denom
# a fixed duration of time
k2 = int(duration * framerate)
k = min(k1, k2)
w1 = numpy.linspace(0, 1, k)
w2 = numpy.ones(n - 2*k)
w3 = numpy.linspace(1, 0, k)
window = numpy.concatenate((w1, w2, w3))
return ys * window
class Signal(object):
"""Represents a time-varying signal."""
def __add__(self, other):
"""Adds two signals.
other: Signal
returns: Signal
"""
if other == 0:
return self
return SumSignal(self, other)
__radd__ = __add__
@property
def period(self):
"""Period of the signal in seconds (property).
For non-periodic signals, use the default, 0.1 seconds
returns: float seconds
"""
return 0.1
def plot(self, framerate=11025):
"""Plots the signal.
framerate: samples per second
"""
duration = self.period * 3
wave = self.make_wave(duration, start=0, framerate=framerate)
wave.plot()
def make_wave(self, duration=1, start=0, framerate=11025):
"""Makes a Wave object.
duration: float seconds
start: float seconds
framerate: int frames per second
returns: Wave
"""
dt = 1.0 / framerate
ts = numpy.arange(start, duration, dt)
ys = self.evaluate(ts)
return Wave(ys, framerate=framerate, start=start)
def infer_framerate(ts):
"""Given ts, find the framerate.
Assumes that the ts are equally spaced.
ts: sequence of times in seconds
returns: frames per second
"""
dt = ts[1] - ts[0]
framerate = 1.0 / dt
return framerate
class SumSignal(Signal):
"""Represents the sum of signals."""
def __init__(self, *args):
"""Initializes the sum.
args: tuple of signals
"""
self.signals = args
@property
def period(self):
"""Period of the signal in seconds.
Note: this is not correct; it's mostly a placekeeper.
But it is correct for a harmonic sequence where all
component frequencies are multiples of the fundamental.
returns: float seconds
"""
return max(sig.period for sig in self.signals)
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
return sum(sig.evaluate(ts) for sig in self.signals)
class Sinusoid(Signal):
"""Represents a sinusoidal signal."""
def __init__(self, freq=440, amp=1.0, offset=0, func=numpy.sin):
"""Initializes a sinusoidal signal.
freq: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
offset: float phase offset in radians
func: function that maps phase to amplitude
"""
self.freq = freq
self.amp = amp
self.offset = offset
self.func = func
@property
def period(self):
"""Period of the signal in seconds.
returns: float seconds
"""
return 1.0 / self.freq
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
phases = PI2 * self.freq * ts + self.offset
ys = self.amp * self.func(phases)
return ys
def CosSignal(freq=440, amp=1.0, offset=0):
"""Makes a consine Sinusoid.
freq: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
offset: float phase offset in radians
returns: Sinusoid object
"""
return Sinusoid(freq, amp, offset, func=numpy.cos)
def SinSignal(freq=440, amp=1.0, offset=0):
"""Makes a sine Sinusoid.
freq: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
offset: float phase offset in radians
returns: Sinusoid object
"""
return Sinusoid(freq, amp, offset, func=numpy.sin)
class SquareSignal(Sinusoid):
"""Represents a square signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = self.amp * numpy.sign(unbias(frac))
return ys
class SawtoothSignal(Sinusoid):
"""Represents a sawtooth signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = normalize(unbias(frac), self.amp)
return ys
class ParabolicSignal(Sinusoid):
"""Represents a parabolic signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = frac**2
ys = normalize(unbias(ys), self.amp)
return ys
class GlottalSignal(Sinusoid):
"""Represents a periodic signal that resembles a glottal signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = frac**4 * (1-frac)
ys = normalize(unbias(ys), self.amp)
return ys
class TriangleSignal(Sinusoid):
"""Represents a triangle signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = numpy.abs(frac - 0.5)
ys = normalize(unbias(ys), self.amp)
return ys
class Chirp(Signal):
"""Represents a signal with variable frequency."""
def __init__(self, start=440, end=880, amp=1.0):
"""Initializes a linear chirp.
start: float frequency in Hz
end: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
"""
self.start = start
self.end = end
self.amp = amp
@property
def period(self):
"""Period of the signal in seconds.
returns: float seconds
"""
return ValueError('Non-periodic signal.')
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
freqs = numpy.linspace(self.start, self.end, len(ts)-1)
return self._evaluate(ts, freqs)
def _evaluate(self, ts, freqs):
"""Helper function that evaluates the signal.
ts: float array of times
freqs: float array of frequencies during each interval
"""
#n = len(freqs)
#print freqs[::n/2]
dts = numpy.diff(ts)
dps = PI2 * freqs * dts
phases = numpy.cumsum(dps)
phases = numpy.insert(phases, 0, 0)
ys = self.amp * numpy.cos(phases)
return ys
class ExpoChirp(Chirp):
"""Represents a signal with varying frequency."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
start, end = math.log10(self.start), math.log10(self.end)
freqs = numpy.logspace(start, end, len(ts)-1)
return self._evaluate(ts, freqs)
class SilentSignal(Signal):
"""Represents silence."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
return numpy.zeros(len(ts))
class _Noise(Signal):
"""Represents a noise signal (abstract parent class)."""
def __init__(self, amp=1.0):
"""Initializes a white noise signal.
amp: float amplitude, 1.0 is nominal max
"""
self.amp = amp
@property
def period(self):
"""Period of the signal in seconds.
returns: float seconds
"""
return ValueError('Non-periodic signal.')
class UncorrelatedUniformNoise(_Noise):
"""Represents uncorrelated uniform noise."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
ys = numpy.random.uniform(-self.amp, self.amp, len(ts))
return ys
class UncorrelatedGaussianNoise(_Noise):
"""Represents uncorrelated gaussian noise."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
ys = numpy.random.normal(0, 1, len(ts))
ys = normalize(ys, self.amp)
return ys
class BrownianNoise(_Noise):
"""Represents Brownian noise, aka red noise."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
Computes Brownian noise by taking the cumulative sum of
a uniform random series.
ts: float array of times
returns: float wave array
"""
#dys = numpy.random.normal(0, 1, len(ts))
dys = numpy.random.uniform(-1, 1, len(ts))
#ys = numpy.cumsum(dys)
ys = scipy.integrate.cumtrapz(dys, ts)
ys = normalize(unbias(ys), self.amp)
return ys
class PinkNoise(_Noise):
"""Represents Brownian noise, aka red noise."""
def __init__(self, amp=1.0, beta=1.0):
"""Initializes a pink noise signal.
amp: float amplitude, 1.0 is nominal max
"""
self.amp = amp
self.beta = beta
def make_wave(self, duration=1, start=0, framerate=11025):
"""Makes a Wave object.
duration: float seconds
start: float seconds
framerate: int frames per second
returns: Wave
"""
signal = UncorrelatedUniformNoise()
wave = signal.make_wave(duration, start, framerate)
spectrum = wave.make_spectrum()
spectrum.pink_filter(beta=self.beta)
wave2 = spectrum.make_wave()
wave2.unbias()
wave2.normalize(self.amp)
return wave2
def rest(duration):
"""Makes a rest of the given duration.
duration: float seconds
returns: Wave
"""
signal = SilentSignal()
wave = signal.make_wave(duration)
return wave
def make_note(midi_num, duration, sig_cons=CosSignal, framerate=11025):
"""Make a MIDI note with the given duration.
midi_num: int MIDI note number
duration: float seconds
sig_cons: Signal constructor function
framerate: int frames per second
returns: Wave
"""
freq = midi_to_freq(midi_num)
signal = sig_cons(freq)
wave = signal.make_wave(duration, framerate=framerate)
wave.apodize()
return wave
def make_chord(midi_nums, duration, sig_cons=CosSignal, framerate=11025):
"""Make a chord with the given duration.
midi_nums: sequence of int MIDI note numbers
duration: float seconds
sig_cons: Signal constructor function
framerate: int frames per second
returns: Wave
"""
freqs = [midi_to_freq(num) for num in midi_nums]
signal = sum(sig_cons(freq) for freq in freqs)
wave = signal.make_wave(duration, framerate=framerate)
wave.apodize()
return wave
def midi_to_freq(midi_num):
"""Converts MIDI note number to frequency.
midi_num: int MIDI note number
returns: float frequency in Hz
"""
x = (midi_num - 69) / 12.0
freq = 440.0 * 2**x
return freq
def sin_wave(freq, duration=1, offset=0):
"""Makes a sine wave with the given parameters.
freq: float cycles per second
duration: float seconds
offset: float radians
returns: Wave
"""
signal = SinSignal(freq, offset=offset)
wave = signal.make_wave(duration)
return wave
def cos_wave(freq, duration=1, offset=0):
"""Makes a cosine wave with the given parameters.
freq: float cycles per second
duration: float seconds
offset: float radians
returns: Wave
"""
signal = CosSignal(freq, offset=offset)
wave = signal.make_wave(duration)
return wave
def mag(a):
"""Computes the magnitude of a numpy array.
a: numpy array
returns: float
"""
return numpy.sqrt(numpy.dot(a, a))
def main():
cos_basis = cos_wave(440)
sin_basis = sin_wave(440)
wave = cos_wave(440, offset=math.pi/2)
cos_cov = cos_basis.cov(wave)
sin_cov = sin_basis.cov(wave)
print cos_cov, sin_cov, mag((cos_cov, sin_cov))
return
wfile = WavFileWriter()
for sig_cons in [SinSignal, TriangleSignal, SawtoothSignal,
GlottalSignal, ParabolicSignal, SquareSignal]:
print sig_cons
sig = sig_cons(440)
wave = sig.make_wave(1)
wave.apodize()
wfile.write(wave)
wfile.close()
return
signal = GlottalSignal(440)
signal.plot()
pyplot.show()
return
wfile = WavFileWriter()
for m in range(60, 0, -1):
wfile.write(make_note(m, 0.25))
wfile.close()
return
wave1 = make_note(69, 1)
wave2 = make_chord([69, 72, 76], 1)
wave = wave1 | wave2
wfile = WavFileWriter()
wfile.write(wave)
wfile.close()
return
sig1 = CosSignal(freq=440)
sig2 = CosSignal(freq=523.25)
sig3 = CosSignal(freq=660)
sig4 = CosSignal(freq=880)
sig5 = CosSignal(freq=987)
sig = sig1 + sig2 + sig3 + sig4
#wave = Wave(sig, duration=0.02)
#wave.plot()
wave = sig.make_wave(duration=1)
#wave.normalize()
wfile = WavFileWriter(wave)
wfile.write()
wfile.close()
if __name__ == '__main__':
main()
| gpl-3.0 |
peterwilletts24/Python-Scripts | plot_scripts/EMBRACE/heat_flux/plot_from_pp_3234_diff_8km.py | 2 | 5598 | """
Load pp, plot and save
"""
import os, sys
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from mpl_toolkits.basemap import Basemap
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as mpl_cm
import numpy as np
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import iris.analysis.cartography
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import scipy.interpolate
import datetime
from mpl_toolkits.basemap import cm
import imp
from textwrap import wrap
import re
import iris.analysis.cartography
import math
experiment_ids = ['dklyu']
save_path='/nfs/a90/eepdw/Figures/EMBRACE/'
model_name_convert_title = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/model_name_convert_title.py')
unrotate = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/unrotate_pole.py')
pp_file = '3234_mean'
degs_crop_top = 3.7
degs_crop_bottom = 3.5
degs_crop_left = 2
degs_crop_right = 3
min_contour = -50
max_contour = 50
tick_interval=20
#
# cmap= cm.s3pcpn_l
divisor=10 # for lat/lon rounding
def main():
# Load diff cube
gl = '/nfs/a90/eepdw/Data/EMBRACE/Mean_State/pp_files/dkmb/dkmbq/%s.pp' % pp_file
glob = iris.load_cube(gl)
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
pfile = '/nfs/a90/eepdw/Data/EMBRACE/Mean_State/pp_files/%s/%s/%s.pp' % (expmin1, experiment_id, pp_file)
pcube = iris.load_cube(pfile)
lat = pcube.coord('grid_latitude').points
lon = pcube.coord('grid_longitude').points
cs = pcube.coord_system('CoordSystem')
if isinstance(cs, iris.coord_systems.RotatedGeogCS):
print ' %s - Unrotate pole %s' % (experiment_id,cs)
lons, lats = np.meshgrid(lon, lat)
lons,lats = iris.analysis.cartography.unrotate_pole(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
lon=lons[0]
lat=lats[:,0]
#pcube.remove_coord('grid_latitude')
#pcube.remove_coord('grid_longitude')
#pcube.add_dim_coord(iris.coords.DimCoord(points=lat, standard_name='grid_latitude', units='degrees', coord_system=csur), lat_dim_coord)
#pcube.add_dim_coord(iris.coords.DimCoord(points=lon, standard_name='grid_longitude', units='degrees', coord_system=csur), lon_dim_coord)
lon_min=np.min(lon)
lon_max=np.max(lon)
lon_low_tick=lon_min -(lon_min%divisor)
lon_high_tick=math.ceil(lon_max/divisor)*divisor
lat_min=np.min(lat)
lat_max=np.max(lat)
lat_low_tick=lat_min - (lat_min%divisor)
lat_high_tick=math.ceil(lat_max/divisor)*divisor
pcubediff=pcube-glob
plt.figure(figsize=(8,8))
cmap= cmap=plt.cm.RdBu_r
ax = plt.axes(projection=ccrs.PlateCarree(), extent=(lon_min+degs_crop_left,lon_max-degs_crop_right,lat_min+degs_crop_bottom,lat_max-degs_crop_top))
clevs = np.linspace(min_contour, max_contour,9)
cont = iplt.contourf(pcubediff, clevs, cmap=cmap, extend='both')
#plt.clabel(cont, fmt='%d')
#ax.stock_img()
ax.coastlines(resolution='110m', color='#262626')
gl = ax.gridlines(draw_labels=True,linewidth=0.5, color='#262626', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
#gl.xlines = False
dx, dy = 10, 10
gl.xlocator = mticker.FixedLocator(range(int(lon_low_tick),int(lon_high_tick)+dx,dx))
gl.ylocator = mticker.FixedLocator(range(int(lat_low_tick),int(lat_high_tick)+dy,dy))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 12, 'color':'#262626'}
#gl.xlabel_style = {'color': '#262626', 'weight': 'bold'}
gl.ylabel_style = {'size': 12, 'color':'#262626'}
cbar = plt.colorbar(cont, orientation='horizontal', pad=0.05, extend='both', format = '%d')
#cbar.set_label('')
cbar.set_label(pcube.units, fontsize=10, color='#262626')
cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
cbar.set_ticklabels(['%d' % i for i in ticks])
main_title='%s - Difference' % pcube.standard_name.title().replace('_',' ')
model_info=re.sub('(.{68} )', '\\1\n', str(model_name_convert_title.main(experiment_id)), 0, re.DOTALL)
model_info = re.sub(r'[(\']', ' ', model_info)
model_info = re.sub(r'[\',)]', ' ', model_info)
print model_info
if not os.path.exists('%s%s/%s' % (save_path, experiment_id, pp_file)): os.makedirs('%s%s/%s' % (save_path, experiment_id, pp_file))
plt.savefig('%s%s/%s/%s_%s_notitle_diff_8km.png' % (save_path, experiment_id, pp_file, experiment_id, pp_file), format='png', bbox_inches='tight')
plt.title('\n'.join(wrap('%s\n%s' % (main_title, model_info), 1000,replace_whitespace=False)), fontsize=16)
#plt.show()
plt.savefig('%s%s/%s/%s_%s_diff_8km.png' % (save_path, experiment_id, pp_file, experiment_id, pp_file), format='png', bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
| mit |
rs2/pandas | pandas/core/arrays/sparse/dtype.py | 1 | 12178 | """Sparse Dtype"""
import re
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type
import warnings
import numpy as np
from pandas._typing import Dtype, DtypeObj
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.base import ExtensionDtype, register_extension_dtype
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.dtypes.common import (
is_bool_dtype,
is_extension_array_dtype,
is_object_dtype,
is_scalar,
is_string_dtype,
pandas_dtype,
)
from pandas.core.dtypes.missing import isna, na_value_for_dtype
if TYPE_CHECKING:
from pandas.core.arrays.sparse.array import SparseArray # noqa: F401
@register_extension_dtype
class SparseDtype(ExtensionDtype):
"""
Dtype for data stored in :class:`SparseArray`.
This dtype implements the pandas ExtensionDtype interface.
.. versionadded:: 0.24.0
Parameters
----------
dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64
The dtype of the underlying array storing the non-fill value values.
fill_value : scalar, optional
The scalar value not stored in the SparseArray. By default, this
depends on `dtype`.
=========== ==========
dtype na_value
=========== ==========
float ``np.nan``
int ``0``
bool ``False``
datetime64 ``pd.NaT``
timedelta64 ``pd.NaT``
=========== ==========
The default value may be overridden by specifying a `fill_value`.
Attributes
----------
None
Methods
-------
None
"""
# We include `_is_na_fill_value` in the metadata to avoid hash collisions
# between SparseDtype(float, 0.0) and SparseDtype(float, nan).
# Without is_na_fill_value in the comparison, those would be equal since
# hash(nan) is (sometimes?) 0.
_metadata = ("_dtype", "_fill_value", "_is_na_fill_value")
def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None):
if isinstance(dtype, type(self)):
if fill_value is None:
fill_value = dtype.fill_value
dtype = dtype.subtype
dtype = pandas_dtype(dtype)
if is_string_dtype(dtype):
dtype = np.dtype("object")
if fill_value is None:
fill_value = na_value_for_dtype(dtype)
if not is_scalar(fill_value):
raise ValueError(f"fill_value must be a scalar. Got {fill_value} instead")
self._dtype = dtype
self._fill_value = fill_value
def __hash__(self):
# Python3 doesn't inherit __hash__ when a base class overrides
# __eq__, so we explicitly do it here.
return super().__hash__()
def __eq__(self, other: Any) -> bool:
# We have to override __eq__ to handle NA values in _metadata.
# The base class does simple == checks, which fail for NA.
if isinstance(other, str):
try:
other = self.construct_from_string(other)
except TypeError:
return False
if isinstance(other, type(self)):
subtype = self.subtype == other.subtype
if self._is_na_fill_value:
# this case is complicated by two things:
# SparseDtype(float, float(nan)) == SparseDtype(float, np.nan)
# SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT)
# i.e. we want to treat any floating-point NaN as equal, but
# not a floating-point NaN and a datetime NaT.
fill_value = (
other._is_na_fill_value
and isinstance(self.fill_value, type(other.fill_value))
or isinstance(other.fill_value, type(self.fill_value))
)
else:
fill_value = self.fill_value == other.fill_value
return subtype and fill_value
return False
@property
def fill_value(self):
"""
The fill value of the array.
Converting the SparseArray to a dense ndarray will fill the
array with this value.
.. warning::
It's possible to end up with a SparseArray that has ``fill_value``
values in ``sp_values``. This can occur, for example, when setting
``SparseArray.fill_value`` directly.
"""
return self._fill_value
@property
def _is_na_fill_value(self):
return isna(self.fill_value)
@property
def _is_numeric(self) -> bool:
return not is_object_dtype(self.subtype)
@property
def _is_boolean(self) -> bool:
return is_bool_dtype(self.subtype)
@property
def kind(self):
"""
The sparse kind. Either 'integer', or 'block'.
"""
return self.subtype.kind
@property
def type(self):
return self.subtype.type
@property
def subtype(self):
return self._dtype
@property
def name(self):
return f"Sparse[{self.subtype.name}, {repr(self.fill_value)}]"
def __repr__(self) -> str:
return self.name
@classmethod
def construct_array_type(cls) -> Type["SparseArray"]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
from pandas.core.arrays.sparse.array import SparseArray # noqa: F811
return SparseArray
@classmethod
def construct_from_string(cls, string: str) -> "SparseDtype":
"""
Construct a SparseDtype from a string form.
Parameters
----------
string : str
Can take the following forms.
string dtype
================ ============================
'int' SparseDtype[np.int64, 0]
'Sparse' SparseDtype[np.float64, nan]
'Sparse[int]' SparseDtype[np.int64, 0]
'Sparse[int, 0]' SparseDtype[np.int64, 0]
================ ============================
It is not possible to specify non-default fill values
with a string. An argument like ``'Sparse[int, 1]'``
will raise a ``TypeError`` because the default fill value
for integers is 0.
Returns
-------
SparseDtype
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
msg = f"Cannot construct a 'SparseDtype' from '{string}'"
if string.startswith("Sparse"):
try:
sub_type, has_fill_value = cls._parse_subtype(string)
except ValueError as err:
raise TypeError(msg) from err
else:
result = SparseDtype(sub_type)
msg = (
f"Cannot construct a 'SparseDtype' from '{string}'.\n\nIt "
"looks like the fill_value in the string is not "
"the default for the dtype. Non-default fill_values "
"are not supported. Use the 'SparseDtype()' "
"constructor instead."
)
if has_fill_value and str(result) != string:
raise TypeError(msg)
return result
else:
raise TypeError(msg)
@staticmethod
def _parse_subtype(dtype: str) -> Tuple[str, bool]:
"""
Parse a string to get the subtype
Parameters
----------
dtype : str
A string like
* Sparse[subtype]
* Sparse[subtype, fill_value]
Returns
-------
subtype : str
Raises
------
ValueError
When the subtype cannot be extracted.
"""
xpr = re.compile(r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$")
m = xpr.match(dtype)
has_fill_value = False
if m:
subtype = m.groupdict()["subtype"]
has_fill_value = bool(m.groupdict()["fill_value"])
elif dtype == "Sparse":
subtype = "float64"
else:
raise ValueError(f"Cannot parse {dtype}")
return subtype, has_fill_value
@classmethod
def is_dtype(cls, dtype: object) -> bool:
dtype = getattr(dtype, "dtype", dtype)
if isinstance(dtype, str) and dtype.startswith("Sparse"):
sub_type, _ = cls._parse_subtype(dtype)
dtype = np.dtype(sub_type)
elif isinstance(dtype, cls):
return True
return isinstance(dtype, np.dtype) or dtype == "Sparse"
def update_dtype(self, dtype):
"""
Convert the SparseDtype to a new dtype.
This takes care of converting the ``fill_value``.
Parameters
----------
dtype : Union[str, numpy.dtype, SparseDtype]
The new dtype to use.
* For a SparseDtype, it is simply returned
* For a NumPy dtype (or str), the current fill value
is converted to the new dtype, and a SparseDtype
with `dtype` and the new fill value is returned.
Returns
-------
SparseDtype
A new SparseDtype with the correct `dtype` and fill value
for that `dtype`.
Raises
------
ValueError
When the current fill value cannot be converted to the
new `dtype` (e.g. trying to convert ``np.nan`` to an
integer dtype).
Examples
--------
>>> SparseDtype(int, 0).update_dtype(float)
Sparse[float64, 0.0]
>>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan))
Sparse[float64, nan]
"""
cls = type(self)
dtype = pandas_dtype(dtype)
if not isinstance(dtype, cls):
if is_extension_array_dtype(dtype):
raise TypeError("sparse arrays of extension dtypes not supported")
fill_value = astype_nansafe(np.array(self.fill_value), dtype).item()
dtype = cls(dtype, fill_value=fill_value)
return dtype
@property
def _subtype_with_str(self):
"""
Whether the SparseDtype's subtype should be considered ``str``.
Typically, pandas will store string data in an object-dtype array.
When converting values to a dtype, e.g. in ``.astype``, we need to
be more specific, we need the actual underlying type.
Returns
-------
>>> SparseDtype(int, 1)._subtype_with_str
dtype('int64')
>>> SparseDtype(object, 1)._subtype_with_str
dtype('O')
>>> dtype = SparseDtype(str, '')
>>> dtype.subtype
dtype('O')
>>> dtype._subtype_with_str
<class 'str'>
"""
if isinstance(self.fill_value, str):
return type(self.fill_value)
return self.subtype
def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]:
# TODO for now only handle SparseDtypes and numpy dtypes => extend
# with other compatibtle extension dtypes
if any(
isinstance(x, ExtensionDtype) and not isinstance(x, SparseDtype)
for x in dtypes
):
return None
fill_values = [x.fill_value for x in dtypes if isinstance(x, SparseDtype)]
fill_value = fill_values[0]
# np.nan isn't a singleton, so we may end up with multiple
# NaNs here, so we ignore tha all NA case too.
if not (len(set(fill_values)) == 1 or isna(fill_values).all()):
warnings.warn(
"Concatenating sparse arrays with multiple fill "
f"values: '{fill_values}'. Picking the first and "
"converting the rest.",
PerformanceWarning,
stacklevel=6,
)
np_dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes]
return SparseDtype(np.find_common_type(np_dtypes, []), fill_value=fill_value)
| bsd-3-clause |
ChanderG/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
sondree/Master-thesis | Python EA/ea/progress_drawing.py | 1 | 3860 | #!/usr/bin/python
from matplotlib.patches import Rectangle, Circle, RegularPolygon, Arrow
from fitness import FITNESS_SECTION
class BasicDrawing(object):
def __init__(self,config):
self.config = config
self.grid_x = self.config.getint(FITNESS_SECTION,"grid_x")
self.grid_y = self.config.getint(FITNESS_SECTION,"grid_y")
self.emitter_x = self.config.getint(FITNESS_SECTION,"emitter_pos_x")
self.emitter_y = self.config.getint(FITNESS_SECTION,"emitter_pos_y")
def make_emitter_patch(self):
s = (self.grid_x + self.grid_y)/2.0 * 0.008
c = RegularPolygon((self.emitter_x,self.emitter_y), 4, s, 3.14/2.0,facecolor='r',edgecolor='k',alpha=0.8)
return c
def make_receiver_patch(self,x,y,color='b'):
s = (self.grid_x + self.grid_y)/2.0 * 0.005
c = Circle((x,y),s,facecolor=color,edgecolor='k',alpha=0.6)
return c
def make_prediction_patch(self,x,y):
s = (self.grid_x + self.grid_y)/2.0 * 0.003
c = Circle((x,y),s,facecolor='k',edgecolor='k',alpha=0.2)
return c
def plot_pheno(self,view,pheno,**args):
c1 = self.make_emitter_patch()
view.add_artist(c1)
for position in pheno.get_position():
try:
x,y,z = position
except ValueError:
x,y = position
c1 = self.make_receiver_patch(x,y)
view.add_artist(c1)
class PathDrawing(BasicDrawing):
def make_receiver_path(self,from_x,from_y,to_x,to_y, alpha=0.2):
c = Arrow(from_x,from_y,to_x-from_x,to_y-from_y,width=1.0, alpha=alpha)
return c
def plot_pheno(self,view,pheno,draw_points=True, draw_lines=True):
c1 = self.make_emitter_patch()
view.add_artist(c1)
for index in xrange(pheno.get_receiver_count()):
p_x, p_y = pheno.get_receiver_origin(index)
c0 = self.make_receiver_patch(p_x,p_y, color = (0.0,1.0,0.5))
view.add_artist(c0)
for position in pheno.get_receiver_path(index):
x,y = position
if draw_points:
c1 = self.make_receiver_patch(x,y)
view.add_artist(c1)
if p_x is not None and p_y is not None and draw_lines:
c2 = self.make_receiver_path(p_x,p_y,x,y)
view.add_artist(c2)
p_x, p_y = x,y
class PathIncrDrawing(PathDrawing):
def plot_pheno(self,view,pheno,draw_points=True, draw_lines=True):
c1 = self.make_emitter_patch()
view.add_artist(c1)
for index in xrange(pheno.get_receiver_count()):
p_x, p_y = pheno.get_receiver_origin(index)
c0 = self.make_receiver_patch(p_x,p_y, color = (0.0,1.0,0.5))
view.add_artist(c0)
for position in pheno.get_receiver_fixed_path(index):
x,y = position
if draw_points:
c1 = self.make_receiver_patch(x,y,color=(1.0,0.3,0))
view.add_artist(c1)
if p_x is not None and p_y is not None and draw_lines:
c2 = self.make_receiver_path(p_x,p_y,x,y)
view.add_artist(c2)
p_x, p_y = x,y
for position in pheno.get_receiver_path(index):
x,y = position
if draw_points:
c1 = self.make_receiver_patch(x,y)
view.add_artist(c1)
if p_x is not None and p_y is not None and draw_lines:
c2 = self.make_receiver_path(p_x,p_y,x,y)
view.add_artist(c2)
p_x, p_y = x,y
| gpl-3.0 |
sandyjmacdonald/dots_for_microarrays | dots_backend/dots_analysis.py | 1 | 12261 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
import warnings
import pandas as pd
import numpy as np
import scipy.cluster.hierarchy as hac
from dots_arrays import Experiment
from sklearn.decomposition import PCA
from itertools import combinations
from scipy.stats import ttest_ind, f_oneway
from statsmodels.stats.multitest import multipletests
from statsmodels.stats.multicomp import MultiComparison
from sklearn.metrics import silhouette_score, silhouette_samples
from sklearn.cluster import KMeans
## Functions ##
def run_pca(experiment):
'''Run PCA when given an experiment instance or data frame with expression values.
Args:
experiment (Experiment instance): An instance of the Experiment class.
Returns:
A Pandas data frame with results of PCA analysis.
'''
## The below if/elif checks whether experiment passed to function is an instance of the
## Experiment class or just a data frame with expression values in.
if isinstance(experiment, Experiment):
df = experiment.get_exp_values().T
elif isinstance(experiment, pd.DataFrame):
df = experiment.T
## Run the PCA, get the scores and unzip tuples into separate lists of x and y values.
pca = PCA(n_components=3)
pca_fit = pca.fit_transform(df)
vals = [(x[0], x[1]) for x in pca_fit]
xvals, yvals = zip(*vals)
## Convert the data into a dictionary for easy conversion into a Pandas data frame.
pca_dict = {'xvals': xvals, 'yvals': yvals, 'sampleid': list(df.index), 'group': [x.split('_')[0] for x in list(df.index)]}
pca_df = pd.DataFrame(pca_dict)
return pca_df
def get_fold_changes(experiment):
'''Calculate pairwise fold change and log fold change values.
Args:
experiment (Experiment instance): An instance of the Experiment class.
Returns:
A new Pandas data frame with pairwise fold change and log fold change values.
'''
groups = experiment.get_groups()
pairs = map(list, list(combinations(groups, 2)))
if all([g.isdigit() for g in groups]):
pairs = sorted(pairs, key=lambda x:x[0])
samples = experiment.get_sampleids()
df = experiment.df
new_df = df.ix[:, :5].copy()
for group in groups:
ids = [sample for sample in samples if group == sample.split('_')[0]]
new_df['mean_' + group] = df[ids].mean(axis=1)
del df
## For each pair, calculate mean values for each group, fold changes and log2 fold changes.
for pair in pairs:
if all([g.isdigit() for g in pair]):
pair.sort(key=int, reverse=True)
else:
pair.sort()
name_1, name_2 = pair
new_df['abs_mean_diff_' + name_1 + '_' + name_2] = abs((2 ** new_df['mean_' + name_1]) - (2 ** new_df['mean_' + name_2]))
new_df['logFC_' + name_1 + '_' + name_2] = new_df['mean_' + name_1] - new_df['mean_' + name_2]
new_df['FC_' + name_1 + '_' + name_2] = 2 ** new_df['logFC_' + name_1 + '_' + name_2]
return new_df
def run_stats(experiment):
'''Run independent T-test or one-way ANOVA dependent on number of groups.
Args:
experiment (Experiment instance): An instance of the Experiment class.
Returns:
A new Pandas data frame with p values, adjusted p values and Tukey HSD
post-hoc results if there are > 2 groups.
'''
groups = experiment.get_groups()
samples = experiment.get_sampleids()
df = experiment.df
all_vals = []
## Get values for each group, ready for T-test or ANOVA.
for group in groups:
ids = [sample for sample in samples if group == sample.split('_')[0]]
vals = map(list, df[ids].values)
all_vals.append(vals)
## Decide whether to use T-test or ANOVA dependent on number of groups.
if len(groups) == 2:
p_vals = [ttest_ind(all_vals[0][i], all_vals[1][i])[1] for i in range(len(all_vals[0]))]
else:
p_vals = []
for i in range(len(all_vals[0])):
row_vals = [all_vals[j][i] for j in range(len(groups))]
p_val = f_oneway(*row_vals)[1]
p_vals.append(p_val)
## Adjust the p values and create a new data frame with them in.
p_val_adj = list(multipletests(p_vals, method='fdr_bh')[1])
new_df = df.ix[:, :5].copy()
new_df['p_val'] = pd.Series(p_vals, index=new_df.index)
new_df['p_val_adj'] = pd.Series(p_val_adj, index=new_df.index)
## Post-hoc test.
## Only do the post-hoc test if there are more than 2 groups, duh!
if len(groups) > 2:
vals_df = df[samples]
group_ids = [sample.split('_')[0] for sample in vals_df.columns.values]
posthoc_results = {}
## Run the post-hoc test on each row.
for row in range(len(vals_df)):
row_vals = vals_df.ix[row]
mc = MultiComparison(row_vals, group_ids)
mc_groups = mc.groupsunique
results = mc.tukeyhsd()
significant = results.reject
pairs = zip(*[x.tolist() for x in mc.pairindices])
## Go through each pair and add results to the posthoc_results dictionary.
for i in range(len(pairs)):
pair = list(pairs[i])
pair.sort()
pair_name = str(mc_groups[pair[0]]) + '_' + str(mc_groups[pair[1]])
if pair_name in posthoc_results:
posthoc_results[pair_name].append(significant[i])
else:
posthoc_results[pair_name] = [significant[i]]
## Add the post-hoc results to the data frame.
for pair_name in posthoc_results:
new_df['significant_' + pair_name] = posthoc_results[pair_name]
return new_df
def find_clusters(df, k_vals=[4, 9, 16, 25], how='hierarchical'):
'''Find clusters, and if method is k-means run silhouette analysis
to determine the value of k.
Args:
df (data frame): A data frame with normalised expression data.
k_vals (list or range): The range over which to test k.
how ('hierarchical' or 'kmeans'): Clustering method.
Returns:
A list of cluster numbers.
'''
## Don't run the silhouette analysis for hierarchical clustering,
## just calculate the clusters using estimate of k.
if how == 'hierarchical':
k = int(np.sqrt((len(df) / 2.0)))
hc = hac.linkage(df, method='average')
optimal_clusters = hac.fcluster(hc, t=k, criterion='maxclust')
## If method is k-means, run silhouette analysis.
elif how == 'kmeans':
best_combined_score = 0
optimal_k = 2
## Try values of k from range and keep track of optimal k according
## to silhouette score.
for k in k_vals:
km = KMeans(n_clusters=k, random_state=10)
clusters = km.fit_predict(df)
silhouette_avg = silhouette_score(df, clusters)
sample_silhouette_values = silhouette_samples(df, clusters)
above_mean = 0
silhouette_sizes = []
for i in range(k):
ith_cluster_silhouette_values = sample_silhouette_values[clusters == i]
size_cluster_i = ith_cluster_silhouette_values.shape[0]
silhouette_sizes.append(size_cluster_i)
if max(ith_cluster_silhouette_values) > silhouette_avg:
above_mean += 1
## This combined score should pick the best value of k
above_mean_score = float(above_mean) / k
std_score = 1.0/np.std(silhouette_sizes) if np.std(silhouette_sizes) > 1.0 else 1.0
combined_score = (silhouette_avg + above_mean_score + std_score) / 3
## Put the clusters in the new column in the data frame.
if combined_score > best_combined_score:
best_combined_score = combined_score
optimal_k = k
optimal_clusters = clusters
optimal_clusters = [cluster + 1 for cluster in optimal_clusters]
return optimal_clusters
def get_clusters(experiment, how='hierarchical'):
'''Clusters significantly differentially expressed genes by expression pattern
across the samples using hierarchical or k-means clustering and silhouette analysis
to pick the value of k (via the find_clusters function).
Args:
experiment (Experiment instance): An instance of the Experiment class.
how ('hierarchical' or 'kmeans'): Clustering method.
Returns:
A new Pandas data frame with fold changes, p values and clusters.
'''
## Run the stats to filter genes down to significant ones only.
stats = run_stats(experiment)
stats = stats[['FeatureNum', 'p_val', 'p_val_adj']].copy()
## Get the fold changes
fcs = get_fold_changes(experiment)
keep_cols = [x for x in fcs.columns.values if 'logFC' in x or 'abs_mean_diff' in x]
fc_cols = [x for x in fcs.columns.values if 'logFC' in x]
fcs = fcs[['FeatureNum'] + keep_cols].copy()
norm_exp_cols = experiment.get_sampleids()
abs_mean_diff_cols = [x for x in fcs.columns.values if 'abs_mean_diff' in x]
## Merge together the stats and fold changes data frames.
merged_df = pd.merge(experiment.df, stats, on='FeatureNum')
merged_df = pd.merge(merged_df, fcs, on='FeatureNum')
## Filter the merged data frame to leave only significantly differentially
## expressed genes (adj. p < 0.05. Also, increase the fold change cutoff until there
## are less than 2,500 rows left in the data frame (so that heat maps can be drawn).
filtered_df = merged_df[(merged_df['p_val_adj'] < 0.05) & ((abs(merged_df[fc_cols]) > np.log2(float(1))).any(1) == True) & ((merged_df[abs_mean_diff_cols] > 0.5).any(1) == True)].copy()
i = 2
while len(filtered_df) * len(experiment.get_sampleids()) > 40000:
filtered_df = merged_df[(merged_df['p_val_adj'] < 0.05) & ((abs(merged_df[fc_cols]) > np.log2(float(i))).any(1) == True) & ((merged_df[abs_mean_diff_cols] > 0.5).any(1) == True)].copy()
i += 1
## Clean up.
del merged_df
del stats
del fcs
## A good guesstimate for k.
k_limit = int(np.sqrt((len(filtered_df) / 2)))
## Catches numpy warnings about means of empty slices.
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
## Hierarchical clustering.
if how == 'hierarchical':
clusters = find_clusters(filtered_df[norm_exp_cols], how='hierarchical')
filtered_df['cluster'] = clusters
## K-means clustering with silhouette analysis to determine value of k.
elif how == 'kmeans':
clusters = find_clusters(filtered_df[norm_exp_cols], k_vals=range(3, k_limit), how='kmeans')
filtered_df['cluster'] = clusters
## Sort the data frame by cluster and mean expression across samples.
filtered_df['mean_norm_expression'] = filtered_df[norm_exp_cols].mean(axis=0)
filtered_df.sort_values(by=['cluster', 'mean_norm_expression'], ascending=[True, False], inplace=True)
filtered_df = filtered_df.reset_index(drop=True)
return filtered_df
def write_fcs_stats(experiment, outfile='foldchanges_stats.txt'):
'''Creates a tab-separated table with a full list of fold changes,
p values, adjusted p values and post hoc results.
Args:
experiment (Experiment instance): An instance of the Experiment class.
outfile (string): The name of the table-separated table to be created.
'''
## Run the stats and fold changes and merge them into a single data frame.
stats = run_stats(experiment)
posthoc_cols = [colname for colname in stats.columns.values if 'significant' in colname]
stats = stats[['FeatureNum', 'p_val', 'p_val_adj'] + posthoc_cols]
fcs = get_fold_changes(experiment)
fc_cols = [colname for colname in fcs.columns.values if not 'abs_mean_diff_' in colname]
merged_df = pd.merge(fcs, stats, on='FeatureNum')
## Define the order of the columns in the data frame.
colnames = list(merged_df.columns.values)
global col_order
col_order = ['mean', 'FC', 'logFC', 'abs_mean_diff', 'p_val', 'adj_p_val', 'significant']
## Function to custom sort the columns.
def keyfunc(col):
for c in col_order:
if col.startswith(c):
return (col_order.index(c), col.lstrip(c + '_'))
break
## Sort the columns.
sorted_colnames = colnames[:5] + sorted(colnames[5:], key=keyfunc)
merged_df = merged_df[sorted_colnames]
## Fix the type of the FeatureNum column and sort it.
merged_df['FeatureNum'] = merged_df['FeatureNum'].astype(int)
merged_df.sort_values(by='FeatureNum', ascending=True, inplace=True)
## Write the table.
merged_df.to_csv(outfile, sep='\t', index=False)
def write_normalised_expression(experiment, outfile='normalised_expression.txt'):
'''Creates a tab-separated table with all of the normalised expression values.
Args:
experiment (Experiment instance): An instance of the Experiment class.
outfile (string): The name of the table-separated table to be created.
'''
## Read in the experiment.
experiment_df = experiment.df
## Sort the values columns.
colnames = list(experiment_df.columns.values)
sorted_colnames = colnames[:5] + sorted(colnames[5:])
experiment_df = experiment_df[sorted_colnames]
## Write the table.
experiment_df.to_csv(outfile, sep='\t', index=False)
| mit |
winklerand/pandas | pandas/core/computation/expressions.py | 4 | 7066 | """
Expressions
-----------
Offer fast expression evaluation through numexpr
"""
import warnings
import numpy as np
from pandas.core.common import _values_from_object
from pandas.core.computation.check import _NUMEXPR_INSTALLED
from pandas.core.config import get_option
if _NUMEXPR_INSTALLED:
import numexpr as ne
_TEST_MODE = None
_TEST_RESULT = None
_USE_NUMEXPR = _NUMEXPR_INSTALLED
_evaluate = None
_where = None
# the set of dtypes that we will allow pass to numexpr
_ALLOWED_DTYPES = {
'evaluate': set(['int64', 'int32', 'float64', 'float32', 'bool']),
'where': set(['int64', 'float64', 'bool'])
}
# the minimum prod shape that we will use numexpr
_MIN_ELEMENTS = 10000
def set_use_numexpr(v=True):
# set/unset to use numexpr
global _USE_NUMEXPR
if _NUMEXPR_INSTALLED:
_USE_NUMEXPR = v
# choose what we are going to do
global _evaluate, _where
if not _USE_NUMEXPR:
_evaluate = _evaluate_standard
_where = _where_standard
else:
_evaluate = _evaluate_numexpr
_where = _where_numexpr
def set_numexpr_threads(n=None):
# if we are using numexpr, set the threads to n
# otherwise reset
if _NUMEXPR_INSTALLED and _USE_NUMEXPR:
if n is None:
n = ne.detect_number_of_cores()
ne.set_num_threads(n)
def _evaluate_standard(op, op_str, a, b, **eval_kwargs):
""" standard evaluation """
if _TEST_MODE:
_store_test_result(False)
with np.errstate(all='ignore'):
return op(a, b)
def _can_use_numexpr(op, op_str, a, b, dtype_check):
""" return a boolean if we WILL be using numexpr """
if op_str is not None:
# required min elements (otherwise we are adding overhead)
if np.prod(a.shape) > _MIN_ELEMENTS:
# check for dtype compatiblity
dtypes = set()
for o in [a, b]:
if hasattr(o, 'get_dtype_counts'):
s = o.get_dtype_counts()
if len(s) > 1:
return False
dtypes |= set(s.index)
elif isinstance(o, np.ndarray):
dtypes |= set([o.dtype.name])
# allowed are a superset
if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:
return True
return False
def _evaluate_numexpr(op, op_str, a, b, truediv=True,
reversed=False, **eval_kwargs):
result = None
if _can_use_numexpr(op, op_str, a, b, 'evaluate'):
try:
# we were originally called by a reversed op
# method
if reversed:
a, b = b, a
a_value = getattr(a, "values", a)
b_value = getattr(b, "values", b)
result = ne.evaluate('a_value {op} b_value'.format(op=op_str),
local_dict={'a_value': a_value,
'b_value': b_value},
casting='safe', truediv=truediv,
**eval_kwargs)
except ValueError as detail:
if 'unknown type object' in str(detail):
pass
if _TEST_MODE:
_store_test_result(result is not None)
if result is None:
result = _evaluate_standard(op, op_str, a, b)
return result
def _where_standard(cond, a, b):
return np.where(_values_from_object(cond), _values_from_object(a),
_values_from_object(b))
def _where_numexpr(cond, a, b):
result = None
if _can_use_numexpr(None, 'where', a, b, 'where'):
try:
cond_value = getattr(cond, 'values', cond)
a_value = getattr(a, 'values', a)
b_value = getattr(b, 'values', b)
result = ne.evaluate('where(cond_value, a_value, b_value)',
local_dict={'cond_value': cond_value,
'a_value': a_value,
'b_value': b_value},
casting='safe')
except ValueError as detail:
if 'unknown type object' in str(detail):
pass
except Exception as detail:
raise TypeError(str(detail))
if result is None:
result = _where_standard(cond, a, b)
return result
# turn myself on
set_use_numexpr(get_option('compute.use_numexpr'))
def _has_bool_dtype(x):
try:
return x.dtype == bool
except AttributeError:
try:
return 'bool' in x.dtypes
except AttributeError:
return isinstance(x, (bool, np.bool_))
def _bool_arith_check(op_str, a, b, not_allowed=frozenset(('/', '//', '**')),
unsupported=None):
if unsupported is None:
unsupported = {'+': '|', '*': '&', '-': '^'}
if _has_bool_dtype(a) and _has_bool_dtype(b):
if op_str in unsupported:
warnings.warn("evaluating in Python space because the {op!r} "
"operator is not supported by numexpr for "
"the bool dtype, use {alt_op!r} instead"
.format(op=op_str, alt_op=unsupported[op_str]))
return False
if op_str in not_allowed:
raise NotImplementedError("operator {op!r} not implemented for "
"bool dtypes".format(op=op_str))
return True
def evaluate(op, op_str, a, b, use_numexpr=True,
**eval_kwargs):
""" evaluate and return the expression of the op on a and b
Parameters
----------
op : the actual operand
op_str: the string version of the op
a : left operand
b : right operand
use_numexpr : whether to try to use numexpr (default True)
"""
use_numexpr = use_numexpr and _bool_arith_check(op_str, a, b)
if use_numexpr:
return _evaluate(op, op_str, a, b, **eval_kwargs)
return _evaluate_standard(op, op_str, a, b)
def where(cond, a, b, use_numexpr=True):
""" evaluate the where condition cond on a and b
Parameters
----------
cond : a boolean array
a : return if cond is True
b : return if cond is False
use_numexpr : whether to try to use numexpr (default True)
"""
if use_numexpr:
return _where(cond, a, b)
return _where_standard(cond, a, b)
def set_test_mode(v=True):
"""
Keeps track of whether numexpr was used. Stores an additional ``True``
for every successful use of evaluate with numexpr since the last
``get_test_result``
"""
global _TEST_MODE, _TEST_RESULT
_TEST_MODE = v
_TEST_RESULT = []
def _store_test_result(used_numexpr):
global _TEST_RESULT
if used_numexpr:
_TEST_RESULT.append(used_numexpr)
def get_test_result():
"""get test result and reset test_results"""
global _TEST_RESULT
res = _TEST_RESULT
_TEST_RESULT = []
return res
| bsd-3-clause |
anguoyang/SMQTK | python/smqtk/indexing/naive_bayes.py | 1 | 9147 | """
LICENCE
-------
Copyright 2015 by Kitware, Inc. All Rights Reserved. Please refer to
KITWARE_LICENSE.TXT for licensing information, or contact General Counsel,
Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065.
"""
from . import Indexer
import cPickle
import os.path as osp
import numpy
from sklearn.naive_bayes import MultinomialNB
import smqtk_config
from smqtk.utils import safe_create_dir, SimpleTimer
class NaiveBayesMultinomial (Indexer):
def __init__(self, data_dir):
self.data_dir = osp.join(smqtk_config.DATA_DIR, data_dir)
# Array of UIDs in the index the UID refers to in these internal
# structures
#: :type: list[object]
self._uid_array = None
self._uid2idx_map = None
# Matrix of features
#: :type: numpy.core.multiarray.ndarray
self._feature_mat = None
if self.has_model_files():
self._load_model_files()
@property
def uid_list_filepath(self):
return osp.join(self.data_dir, "uid_list.pickle")
@property
def feature_mat_filepath(self):
return osp.join(self.data_dir, "feature_mat.npy")
def has_model_files(self):
return (osp.isfile(self.uid_list_filepath)
and osp.isfile(self.feature_mat_filepath))
def _load_model_files(self):
with open(self.uid_list_filepath, 'rb') as infile:
#: :type: list[object]
self._uid_array = cPickle.load(infile)
#: :type: numpy.core.multiarray.ndarray
self._feature_mat = numpy.load(self.feature_mat_filepath)
# Mapping of element UID to array/matrix index position
#: :type: dict of int
self._uid2idx_map = {}
for idx, uid in enumerate(self._uid_array):
self._uid2idx_map[uid] = idx
def has_model(self):
"""
:return: True if this indexer has a valid initialized model for
extension and ranking (or doesn't need one to perform those tasks).
:rtype: bool
"""
return (
self._uid_array is not None
and self._feature_mat is not None
and 0 not in self._feature_mat.shape # has dimensionality
)
def generate_model(self, descriptor_map, parallel=None, **kwargs):
"""
Generate this indexers data-model using the given features,
saving it to files in the configured data directory.
:raises RuntimeError: Precaution error when there is an existing data
model for this indexer. Manually delete or move the existing
model before computing another one.
Specific implementations may error on other things. See the specific
implementations for more details.
:raises ValueError: The given feature map had no content.
:param descriptor_map: Mapping of integer IDs to feature data. All feature
data must be of the same size!
:type descriptor_map: dict of (int, numpy.core.multiarray.ndarray)
:param parallel: Optionally specification of how many processors to use
when pooling sub-tasks. If None, we attempt to use all available
cores.
:type parallel: int
"""
super(NaiveBayesMultinomial, self).generate_model(descriptor_map, parallel)
num_features = len(descriptor_map)
ordered_uids = sorted(descriptor_map.keys())
sample_feature = descriptor_map[ordered_uids[0]]
feature_len = len(sample_feature)
# Pre-allocating arrays
self._uid_array = []
self._feature_mat = numpy.zeros(
(num_features, feature_len), dtype=sample_feature.dtype
)
self.log.info("Populating feature matrix")
for i, (uid, feat) in enumerate(descriptor_map.iteritems()):
self._uid_array.append(uid)
self._feature_mat[i] = feat
with SimpleTimer("Saving data files", self.log.info):
safe_create_dir(self.data_dir)
with open(self.uid_list_filepath, 'wb') as ofile:
cPickle.dump(self._uid_array, ofile)
numpy.save(self.feature_mat_filepath, self._feature_mat)
def extend_model(self, uid_feature_map, parallel=None):
"""
Extend, in memory, the current model with the given feature elements.
Online extensions are not saved to data files.
NOTE: For now, if there is currently no data model created for this
indexer / descriptor combination, we will error. In the future, I
would imagine a new model would be created.
:raises RuntimeError: No current model.
:param uid_feature_map: Mapping of integer IDs to features to extend this
indexer's model with.
:type uid_feature_map: dict of (collections.Hashable, numpy.core.multiarray.ndarray)
:param parallel: Optionally specification of how many processors to use
when pooling sub-tasks. If None, we attempt to use all available
cores. Not all implementation support parallel model extension.
:type parallel: int
"""
super(NaiveBayesMultinomial, self).extend_model(uid_feature_map, parallel)
# Shortcut when we're not given anything to actually process
if not uid_feature_map:
self.log.debug("No new features to extend")
return
# Check UID intersection
with SimpleTimer("Checking UID uniqueness", self.log.debug):
cur_uids = set(self._uid_array)
intersection = cur_uids.intersection(uid_feature_map.keys())
if intersection:
raise ValueError("The following IDs are already present in the "
"indexer's model: %s" % tuple(intersection))
# Check feature consistency
# - Assuming that there is are least one feature in our current model...
with SimpleTimer("Checking input feature shape", self.log.debug):
example_feat = self._feature_mat[0]
for feat in uid_feature_map.values():
if feat.shape[0] != example_feat.shape[0]:
raise ValueError("One or more features provided are not of "
"the correct shape! Found %s when we "
"require %s"
% (feat.shape, example_feat.shape[1]))
del example_feat # Deleting so we can resize later in the function
# Extend data structures
# - UID and Feature matrix can be simply resized in-place as we are
# strictly adding to the end of the structure in memory.
# - distance matrix, since we're adding new columns in addition to rows,
# need to create a new matrix of the desired shape, copying in
# existing into new matrix.
self.log.debug("Sorting feature UIDs")
new_uids = sorted(uid_feature_map.keys())
self.log.debug("Calculating before and after sizes.")
num_features_before = self._feature_mat.shape[0]
num_features_after = num_features_before + len(uid_feature_map)
with SimpleTimer("Resizing uid/feature matrices", self.log.debug):
self._feature_mat.resize((num_features_after,
self._feature_mat.shape[1]))
with SimpleTimer("Adding to matrices", self.log.debug):
for i in range(num_features_before, num_features_after):
i_uid = new_uids[i-num_features_before]
self._uid_array.append(i_uid)
assert len(self._uid_array) == i+1
self._uid2idx_map[i_uid] = i
self._feature_mat[i] = uid_feature_map[i_uid]
def rank(self, pos_ids, neg_ids=()):
super(NaiveBayesMultinomial, self).rank(pos_ids, neg_ids)
num_pos = len(pos_ids)
num_neg = len(neg_ids)
train = numpy.ndarray((num_pos + num_neg, self._feature_mat.shape[1]),
dtype=self._feature_mat.dtype)
train[:num_pos, :] = \
self._feature_mat[tuple(self._uid2idx_map[uid] for uid in pos_ids), :]
train[num_pos:num_pos+num_neg, :] = \
self._feature_mat[tuple(self._uid2idx_map[uid] for uid in neg_ids), :]
# Positive elements are label 1, negatives are label 0
labels = numpy.concatenate((numpy.ones(len(pos_ids)),
numpy.zeros(len(neg_ids))))
# Only really care about probability of positive, so just keeping that
# column.
mnb = MultinomialNB()
probs = mnb.fit(train, labels).predict_proba(self._feature_mat)[:, 1]
return dict(zip(self._uid_array, probs))
def reset(self):
"""
Reset this indexer to its original state, i.e. removing any model
extension that may have occurred.
:raises RuntimeError: Unable to reset due to lack of available model.
"""
super(NaiveBayesMultinomial, self).reset()
self._load_model_files()
INDEXER_CLASS = [
NaiveBayesMultinomial
]
| bsd-3-clause |
timsnyder/bokeh | examples/reference/models/multi_select_server.py | 1 | 1279 | ## Bokeh server for MultiSelect
import pandas as pd
from bokeh.io import curdoc
from bokeh.layouts import row
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import MultiSelect
from bokeh.plotting import figure
x=[3,4,6,12,10,1]
y=[7,1,3,4,1,6]
label=['Red', 'Orange', 'Red', 'Orange','Red', 'Orange']
df=pd.DataFrame({'x':x,'y':y,'label':label}) #create a dataframe for future use
source = ColumnDataSource(data=dict(x=x, y=y,label=label))
plot_figure = figure(title='Multi-Select',plot_height=450, plot_width=600,
tools="save,reset", toolbar_location="below")
plot_figure.scatter('x', 'y',color='label', source=source, size=10)
multi_select = MultiSelect(title="Filter Plot by color:", value=["Red", "Orange"],
options=[("Red", "Red"), ("Orange", "Orange")])
def multiselect_click(attr,old,new):
active_mselect=multi_select.value ##Getting multi-select value
selected_df=df[df['label'].isin(active_mselect)] #filter the dataframe with value in multi-select
source.data=dict(x=selected_df.x, y=selected_df.y,label=selected_df.label)
multi_select.on_change('value',multiselect_click)
layout=row(multi_select, plot_figure)
curdoc().add_root(layout)
curdoc().title = "Multi-Select Bokeh Server"
| bsd-3-clause |
burakbayramli/quant_at | data/simple.py | 1 | 7677 | from pymongo import MongoClient
import logging, Quandl, random, os
import datetime, glob, pandas as pd
from pandas_datareader import data, wb
import numpy as np, sys
from memo import *
MONGO_STAT = "C:\\Progra~1\\MongoDB\\Server\\3.2\\bin\\mongostat.exe /rowcount:1"
@memo # so that we dont constantly read the .quand file
def get_quandl_auth():
fname = '%s/.quandl' % os.environ['HOME']
if not os.path.isfile(fname):
print 'Please create a %s file ' % fname
exit()
auth = open(fname).read()
return auth
def web_load(symbol, backend, start, end):
"""
Outside interface to get all the data
"""
auth = get_quandl_auth()
try:
if backend == "fred":
return data.DataReader(symbol, backend, start, end)
if backend == "google":
return data.DataReader(market + ":" + symbol, backend, start, end)
if backend == "yahoo":
return data.DataReader(symbol, backend, start, end)
except IOError:
logging.debug("cant find " + symbol)
def get_beginning_of_time():
return datetime.datetime(2006, 1, 1)
def get_today():
#today=datetime.datetime(2016, 2, 15) # hack, freeze the end time
dt=datetime.datetime.today() - datetime.timedelta(days=1)
today = datetime.datetime(dt.year, dt.month, dt.day)
today_int = int(today.strftime('%Y%m%d') )
return today, today_int
def get_last_date_in_db(symbol, db, today):
ts = db.simple.find( {"_id.sym": symbol} )
# Check if there are records.
if ts.count() > 0:
q = {"$query" :{"_id.sym": symbol},"$orderby":{"_id.dt" : -1}}
ts = list(db.simple.find(q).limit(1))
last_date_in_db = int(ts[0]['_id']['dt'])
return pd.to_datetime(str(last_date_in_db), format='%Y%m%d')
def do_download(items):
"""
Download a given list of (market,symbol,name) triplets.
This list would have been prepared outside of this call, probably
a chunk of bigger list of symbols. This way this function has no
knowledge of what all symbols are, it only works on the piece given
to it.
"""
connection = MongoClient()
db = connection.findb
tickers = db.simple
beginning_of_time=get_beginning_of_time()
today, today_int = get_today()
logging.debug ("%d items" % len(items))
for market,symbol,name in items:
logging.debug("%s %s" % (symbol, name))
s = None; last_date_in_db = None
if market == "fred":
last_date = get_last_date_in_db(symbol, db, today)
logging.debug('last %s' % last_date)
logging.debug('today %s' % today)
if last_date and last_date >= today:
logging.debug('no need')
continue
s = web_load(symbol, "fred", beginning_of_time, today)
if 'DataFrame' not in str(type(s)): continue
for srow in s.iterrows():
dt = str(srow[0])[0:10]
dt = int(dt.replace("-",""))
new_row = {"_id": {"sym": symbol, "dt": dt },"a": float(srow[1])}
tickers.save(new_row)
elif market == "yahoo" :
start = beginning_of_time; end = today
last_date = get_last_date_in_db(symbol,db,today)
logging.debug('last %s' % last_date)
logging.debug('today %s' % today)
if last_date and last_date >= today:
logging.debug('no need')
continue
if last_date: start = last_date
logging.debug("" + repr(start) + " " + repr(end))
s = web_load(symbol, market, start, end)
# symbol could not be found
if 'DataFrame' not in str(type(s)): continue
for srow in s.iterrows():
dt = int((str(srow[0])[0:10]).replace("-",""))
new_row = {"_id": {"sym": symbol, "dt": dt },
"o": srow[1].Open,
"h": srow[1].High,
"l": srow[1].Low,
"c": srow[1].Close,
"v": srow[1].Volume,
"a": srow[1]['Adj Close']}
tickers.save(new_row)
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in xrange(0, len(l), n):
yield l[i:i+n]
def download_data(ith_chunk=0, no_chunks=1,base_dir="."):
"""
Download data for the ith chunk of no_chunks. The chunks
come from a list of all available stock, etf symbols
"""
res = []
df = pd.read_csv("simple.csv")
for line in df.iterrows():
res.append((line[1].Engine, line[1].Symbol, line[1].Name))
random.seed(0)
random.shuffle(res)
s = int(len(res) / no_chunks)
res = list(chunks(res, s))
do_download(res[ith_chunk])
def get(symbol):
"""
Returns all data for symbol in a pandas dataframe
"""
connection = MongoClient()
db = connection.findb
q = {"$query" :{"_id.sym": symbol},"$orderby":{"_id.dt" : 1}}
res = list(db.simple.find( q )); res1 = []
if len(res) == 0: return pd.DataFrame()
if 'c' in res[0]: # then we have a stock ticker, this series does not have 'closed' or 'open'
for x in res: res1.append( { 'a': x['a'],'c': x['c'],'h':x['h'], 'l': x['l'],'o': x['o'],'Date':x['_id']['dt']} )
else: # we have a macro timeseries, 'a' always exists in all time series
for x in res: res1.append( { 'a': x['a'],'Date':x['_id']['dt']} )
df = pd.DataFrame(res1, columns=res1[0].keys())
df['Date'] = pd.to_datetime(df.Date,format='%Y%m%d')
df = df.set_index('Date')
return df
def get_multi(symbols):
"""
Returns all data for symbols
"""
dfs = [get(x).a for x in symbols]
res = pd.concat(dfs,axis=1)
res.columns = symbols
return res
def get_hft(symbol, date):
"""
Return minute level high-frequency data for the given symbol and date
"""
connection = MongoClient()
db = connection.findb
q = {"$query" :{"_id.sym": symbol, "_id.dt": date} }
res = list(db.simple.find(q).limit(1))
if len(res) > 0 and 'hft' in res[0].keys():
df = pd.DataFrame(res[0]['hft'])
return df.T
def get_hft_for_dates(symbol, start, end):
"""
Return minute level high-frequency data for a time period
between start and end.
"""
start = pd.to_datetime(str(start), format='%Y%m%d')
end = pd.to_datetime(str(end), format='%Y%m%d')
dates = [(start+datetime.timedelta(days=i)).strftime('%Y%m%d') for i in range((end-start).days+1)]
res = []
for dt in dates:
df = get_hft(symbol, int(dt))
if 'DataFrame' in str(type(df)):
df['Date'] = dt
df = df.set_index('Date',append=True)
res.append(df)
dfs = pd.concat(res)
return dfs
def check_mongo():
pipe = os.popen(MONGO_STAT + ' 2>&1', 'r')
text = pipe.read()
if 'no reachable servers' in text:
print "\n\n**** Mongo is not running ****\n\n"
exit()
if __name__ == "__main__":
check_mongo()
f = '%(asctime)-15s: %(message)s'
if len(sys.argv) == 3:
logging.basicConfig(filename='%s/simple-%d.log' % (os.environ['TEMP'],int(sys.argv[1])),level=logging.DEBUG,format=f)
download_data(int(sys.argv[1]),int(sys.argv[2]))
else:
logging.basicConfig(filename='%s/simple.log' % os.environ['TEMP'],level=logging.DEBUG, format=f)
download_data()
| gpl-3.0 |
RobertABT/heightmap | build/matplotlib/examples/axes_grid/make_room_for_ylabel_using_axesgrid.py | 15 | 1723 | from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1.axes_divider import make_axes_area_auto_adjustable
if __name__ == "__main__":
import matplotlib.pyplot as plt
def ex1():
plt.figure(1)
ax = plt.axes([0,0,1,1])
# ax = plt.subplot(111)
ax.set_yticks([0.5])
ax.set_yticklabels(["very long label"])
make_axes_area_auto_adjustable(ax)
def ex2():
plt.figure(2)
ax1 = plt.axes([0,0,1,0.5])
ax2 = plt.axes([0,0.5,1,0.5])
ax1.set_yticks([0.5])
ax1.set_yticklabels(["very long label"])
ax1.set_ylabel("Y label")
ax2.set_title("Title")
make_axes_area_auto_adjustable(ax1, pad=0.1, use_axes=[ax1, ax2])
make_axes_area_auto_adjustable(ax2, pad=0.1, use_axes=[ax1, ax2])
def ex3():
fig = plt.figure(3)
ax1 = plt.axes([0,0,1,1])
divider = make_axes_locatable(ax1)
ax2 = divider.new_horizontal("100%", pad=0.3, sharey=ax1)
ax2.tick_params(labelleft="off")
fig.add_axes(ax2)
divider.add_auto_adjustable_area(use_axes=[ax1], pad=0.1,
adjust_dirs=["left"])
divider.add_auto_adjustable_area(use_axes=[ax2], pad=0.1,
adjust_dirs=["right"])
divider.add_auto_adjustable_area(use_axes=[ax1, ax2], pad=0.1,
adjust_dirs=["top", "bottom"])
ax1.set_yticks([0.5])
ax1.set_yticklabels(["very long label"])
ax2.set_title("Title")
ax2.set_xlabel("X - Label")
ex1()
ex2()
ex3()
plt.show()
| mit |
arborh/tensorflow | tensorflow/python/keras/engine/data_adapter_test.py | 3 | 30720 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DataAdapter tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import data_adapter
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class DummyArrayLike(object):
"""Dummy array-like object."""
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, key):
return self.data[key]
@property
def shape(self):
return self.data.shape
@property
def dtype(self):
return self.data.dtype
def fail_on_convert(x, **kwargs):
_ = x
_ = kwargs
raise TypeError('Cannot convert DummyArrayLike to a tensor')
ops.register_tensor_conversion_function(DummyArrayLike, fail_on_convert)
class DataAdapterTestBase(keras_parameterized.TestCase):
def setUp(self):
super(DataAdapterTestBase, self).setUp()
self.batch_size = 5
self.numpy_input = np.zeros((50, 10))
self.numpy_target = np.ones(50)
self.tensor_input = constant_op.constant(2.0, shape=(50, 10))
self.tensor_target = array_ops.ones((50,))
self.arraylike_input = DummyArrayLike(self.numpy_input)
self.arraylike_target = DummyArrayLike(self.numpy_target)
self.dataset_input = dataset_ops.DatasetV2.from_tensor_slices(
(self.numpy_input, self.numpy_target)).shuffle(50).batch(
self.batch_size)
def generator():
while True:
yield (np.zeros((self.batch_size, 10)), np.ones(self.batch_size))
self.generator_input = generator()
self.iterator_input = data_utils.threadsafe_generator(generator)()
self.sequence_input = TestSequence(batch_size=self.batch_size,
feature_shape=10)
self.model = keras.models.Sequential(
[keras.layers.Dense(8, input_shape=(10,), activation='softmax')])
class TestSequence(data_utils.Sequence):
def __init__(self, batch_size, feature_shape):
self.batch_size = batch_size
self.feature_shape = feature_shape
def __getitem__(self, item):
return (np.zeros((self.batch_size, self.feature_shape)),
np.ones((self.batch_size,)))
def __len__(self):
return 10
class TensorLikeDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super(TensorLikeDataAdapterTest, self).setUp()
self.adapter_cls = data_adapter.TensorLikeDataAdapter
def test_can_handle_numpy(self):
self.assertTrue(self.adapter_cls.can_handle(self.numpy_input))
self.assertTrue(
self.adapter_cls.can_handle(self.numpy_input, self.numpy_target))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
def test_iterator_expect_batch_size_numpy(self):
with self.assertRaisesRegexp(
ValueError, r'`batch_size` or `steps` is required'):
self.adapter_cls(self.numpy_input, self.numpy_target)
def test_size_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
def test_batch_size_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5)
self.assertEqual(adapter.batch_size(), 5)
def test_partial_batch_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=4)
self.assertEqual(adapter.get_size(), 13) # 50/4
self.assertTrue(adapter.has_partial_batch())
self.assertEqual(adapter.partial_batch_size(), 2)
def test_epochs(self):
num_epochs = 3
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5, epochs=num_epochs)
ds_iter = iter(adapter.get_dataset())
num_batches_per_epoch = self.numpy_input.shape[0] // 5
for _ in range(num_batches_per_epoch * num_epochs):
next(ds_iter)
with self.assertRaises(StopIteration):
next(ds_iter)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_numpy(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.numpy_input, self.numpy_target, batch_size=5)
def test_can_handle_pandas(self):
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
self.skipTest('Skipping test because pandas is not installed.')
self.assertTrue(self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input)))
self.assertTrue(
self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input)[0]))
self.assertTrue(
self.adapter_cls.can_handle(
pd.DataFrame(self.numpy_input),
pd.DataFrame(self.numpy_input)[0]))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_pandas(self):
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
self.skipTest('Skipping test because pandas is not installed.')
input_a = keras.Input(shape=(3,), name='input_a')
input_b = keras.Input(shape=(3,), name='input_b')
input_c = keras.Input(shape=(1,), name='input_b')
x = keras.layers.Dense(4, name='dense_1')(input_a)
y = keras.layers.Dense(3, name='dense_2')(input_b)
z = keras.layers.Dense(1, name='dense_3')(input_c)
model_1 = keras.Model(inputs=input_a, outputs=x)
model_2 = keras.Model(inputs=[input_a, input_b], outputs=[x, y])
model_3 = keras.Model(inputs=input_c, outputs=z)
model_1.compile(optimizer='rmsprop', loss='mse')
model_2.compile(optimizer='rmsprop', loss='mse')
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
input_a_df = pd.DataFrame(input_a_np)
input_b_df = pd.DataFrame(input_b_np)
output_a_df = pd.DataFrame(np.random.random((10, 4)))
output_b_df = pd.DataFrame(np.random.random((10, 3)))
model_1.fit(input_a_df,
output_a_df)
model_2.fit([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.fit([input_a_df],
[output_a_df])
model_1.fit({'input_a': input_a_df},
output_a_df)
model_2.fit({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
model_1.evaluate(input_a_df,
output_a_df)
model_2.evaluate([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.evaluate([input_a_df],
[output_a_df])
model_1.evaluate({'input_a': input_a_df},
output_a_df)
model_2.evaluate({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
# Verify predicting on pandas vs numpy returns the same result
predict_1_pandas = model_1.predict(input_a_df)
predict_2_pandas = model_2.predict([input_a_df, input_b_df])
predict_3_pandas = model_3.predict(input_a_df[0])
predict_1_numpy = model_1.predict(input_a_np)
predict_2_numpy = model_2.predict([input_a_np, input_b_np])
predict_3_numpy = model_3.predict(np.asarray(input_a_df[0]))
self.assertAllClose(predict_1_numpy, predict_1_pandas)
self.assertAllClose(predict_2_numpy, predict_2_pandas)
self.assertAllClose(predict_3_numpy, predict_3_pandas)
# Extra ways to pass in dataframes
model_1.predict([input_a_df])
model_1.predict({'input_a': input_a_df})
model_2.predict({'input_a': input_a_df, 'input_b': input_b_df})
def test_can_handle(self):
self.assertTrue(self.adapter_cls.can_handle(self.tensor_input))
self.assertTrue(
self.adapter_cls.can_handle(self.tensor_input, self.tensor_target))
self.assertFalse(self.adapter_cls.can_handle(self.arraylike_input))
self.assertFalse(
self.adapter_cls.can_handle(self.arraylike_input,
self.arraylike_target))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.tensor_input, self.tensor_target, batch_size=5)
def test_size(self):
adapter = self.adapter_cls(
self.tensor_input, self.tensor_target, batch_size=5)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
def test_shuffle_correctness(self):
with context.eager_mode():
num_samples = 100
batch_size = 32
x = np.arange(num_samples)
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle=True, epochs=2)
def _get_epoch(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return np.concatenate(ds_data)
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
def test_batch_shuffle_correctness(self):
with context.eager_mode():
num_samples = 100
batch_size = 6
x = np.arange(num_samples)
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle='batch', epochs=2)
def _get_epoch_batches(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return ds_data
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_batch_data = _get_epoch_batches(ds_iter)
epoch_data = np.concatenate(epoch_batch_data)
def _verify_batch(batch):
# Verify that a batch contains only contiguous data, and that it has
# been shuffled.
shuffled_batch = np.sort(batch)
self.assertNotAllClose(batch, shuffled_batch)
for i in range(1, len(batch)):
self.assertEqual(shuffled_batch[i-1] + 1, shuffled_batch[i])
# Assert that the data within each batch remains contiguous
for batch in epoch_batch_data:
_verify_batch(batch)
# Check that individual batches are unshuffled
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_batch_data = _get_epoch_batches(ds_iter)
second_epoch_data = np.concatenate(second_epoch_batch_data)
# Assert that the data within each batch remains contiguous
for batch in second_epoch_batch_data:
_verify_batch(batch)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
@parameterized.named_parameters(
('batch_size_5', 5, None, 5),
('batch_size_50', 50, 4, 50), # Sanity check: batch_size takes precedence
('steps_1', None, 1, 50),
('steps_4', None, 4, 13),
)
def test_batch_size(self, batch_size_in, steps, batch_size_out):
adapter = self.adapter_cls(
self.tensor_input, self.tensor_target, batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.batch_size(), batch_size_out)
@parameterized.named_parameters(
('batch_size_5', 5, None, 10, 0),
('batch_size_4', 4, None, 13, 2),
('steps_1', None, 1, 1, 0),
('steps_5', None, 5, 5, 0),
('steps_4', None, 4, 4, 11),
)
def test_partial_batch(
self, batch_size_in, steps, size, partial_batch_size):
adapter = self.adapter_cls(
self.tensor_input, self.tensor_target, batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.get_size(), size) # 50/steps
self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size))
self.assertEqual(adapter.partial_batch_size(), partial_batch_size or None)
class GenericArrayLikeDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super(GenericArrayLikeDataAdapterTest, self).setUp()
self.adapter_cls = data_adapter.GenericArrayLikeDataAdapter
def test_can_handle_some_numpy(self):
self.assertTrue(self.adapter_cls.can_handle(
self.arraylike_input))
self.assertTrue(
self.adapter_cls.can_handle(self.arraylike_input,
self.arraylike_target))
# Because adapters are mutually exclusive, don't handle cases
# where all the data is numpy or an eagertensor
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(
self.adapter_cls.can_handle(self.numpy_input,
self.numpy_target))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertFalse(
self.adapter_cls.can_handle(self.tensor_input, self.tensor_target))
# But do handle mixes that include generic arraylike data
self.assertTrue(
self.adapter_cls.can_handle(self.numpy_input,
self.arraylike_target))
self.assertTrue(
self.adapter_cls.can_handle(self.arraylike_input,
self.numpy_target))
self.assertTrue(
self.adapter_cls.can_handle(self.arraylike_input,
self.tensor_target))
self.assertTrue(
self.adapter_cls.can_handle(self.tensor_input,
self.arraylike_target))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
def test_iterator_expect_batch_size_generic_arraylike(self):
with self.assertRaisesRegexp(
ValueError, r'`batch_size` or `steps` is required'):
self.adapter_cls(self.arraylike_input,
self.arraylike_target)
def test_size(self):
adapter = self.adapter_cls(
self.arraylike_input,
self.arraylike_target, batch_size=5)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
def test_epochs(self):
num_epochs = 3
adapter = self.adapter_cls(
self.arraylike_input,
self.numpy_target, batch_size=5, epochs=num_epochs)
ds_iter = iter(adapter.get_dataset())
num_batches_per_epoch = self.numpy_input.shape[0] // 5
for _ in range(num_batches_per_epoch * num_epochs):
next(ds_iter)
with self.assertRaises(StopIteration):
next(ds_iter)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
# First verify that DummyArrayLike can't be converted to a Tensor
with self.assertRaises(TypeError):
ops.convert_to_tensor(self.arraylike_input)
# Then train on the array like.
# It should not be converted to a tensor directly (which would force it into
# memory), only the sliced data should be converted.
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.arraylike_input,
self.arraylike_target, batch_size=5)
self.model.fit(self.arraylike_input,
self.arraylike_target,
shuffle=True, batch_size=5)
self.model.fit(self.arraylike_input,
self.arraylike_target,
shuffle='batch', batch_size=5)
self.model.evaluate(self.arraylike_input,
self.arraylike_target, batch_size=5)
self.model.predict(self.arraylike_input, batch_size=5)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_numpy_target(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.arraylike_input,
self.numpy_target, batch_size=5)
self.model.fit(self.arraylike_input,
self.numpy_target, shuffle=True,
batch_size=5)
self.model.fit(self.arraylike_input,
self.numpy_target, shuffle='batch',
batch_size=5)
self.model.evaluate(self.arraylike_input,
self.numpy_target, batch_size=5)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_tensor_target(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.arraylike_input,
self.tensor_target, batch_size=5)
self.model.fit(self.arraylike_input,
self.tensor_target, shuffle=True,
batch_size=5)
self.model.fit(self.arraylike_input,
self.tensor_target, shuffle='batch',
batch_size=5)
self.model.evaluate(self.arraylike_input,
self.tensor_target, batch_size=5)
def test_shuffle_correctness(self):
with context.eager_mode():
num_samples = 100
batch_size = 32
x = DummyArrayLike(np.arange(num_samples))
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle=True, epochs=2)
def _get_epoch(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return np.concatenate(ds_data)
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
def test_batch_shuffle_correctness(self):
with context.eager_mode():
num_samples = 100
batch_size = 6
x = DummyArrayLike(np.arange(num_samples))
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle='batch', epochs=2)
def _get_epoch_batches(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return ds_data
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_batch_data = _get_epoch_batches(ds_iter)
epoch_data = np.concatenate(epoch_batch_data)
def _verify_batch(batch):
# Verify that a batch contains only contiguous data, but that it has
# been shuffled.
shuffled_batch = np.sort(batch)
self.assertNotAllClose(batch, shuffled_batch)
for i in range(1, len(batch)):
self.assertEqual(shuffled_batch[i-1] + 1, shuffled_batch[i])
# Assert that the data within each batch is shuffled contiguous data
for batch in epoch_batch_data:
_verify_batch(batch)
# Check that individual batches are unshuffled
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_batch_data = _get_epoch_batches(ds_iter)
second_epoch_data = np.concatenate(second_epoch_batch_data)
# Assert that the data within each batch remains contiguous
for batch in second_epoch_batch_data:
_verify_batch(batch)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
@parameterized.named_parameters(
('batch_size_5', 5, None, 5),
('batch_size_50', 50, 4, 50), # Sanity check: batch_size takes precedence
('steps_1', None, 1, 50),
('steps_4', None, 4, 13),
)
def test_batch_size(self, batch_size_in, steps, batch_size_out):
adapter = self.adapter_cls(
self.arraylike_input,
self.arraylike_target, batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.batch_size(), batch_size_out)
@parameterized.named_parameters(
('batch_size_5', 5, None, 10, 0),
('batch_size_4', 4, None, 13, 2),
('steps_1', None, 1, 1, 0),
('steps_5', None, 5, 5, 0),
('steps_4', None, 4, 4, 11),
)
def test_partial_batch(
self, batch_size_in, steps, size, partial_batch_size):
adapter = self.adapter_cls(
self.arraylike_input, self.arraylike_target,
batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.get_size(), size) # 50/steps
self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size))
self.assertEqual(adapter.partial_batch_size(), partial_batch_size or None)
class DatasetAdapterTest(DataAdapterTestBase):
def setUp(self):
super(DatasetAdapterTest, self).setUp()
self.adapter_cls = data_adapter.DatasetAdapter
def test_can_handle(self):
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertTrue(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
dataset = self.adapter_cls(self.dataset_input).get_dataset()
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(dataset)
def test_size(self):
adapter = self.adapter_cls(self.dataset_input)
self.assertIsNone(adapter.get_size())
def test_batch_size(self):
adapter = self.adapter_cls(self.dataset_input)
self.assertIsNone(adapter.batch_size())
def test_partial_batch(self):
adapter = self.adapter_cls(self.dataset_input)
self.assertFalse(adapter.has_partial_batch())
self.assertIsNone(adapter.partial_batch_size())
def test_invalid_targets_argument(self):
with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'):
self.adapter_cls(self.dataset_input, y=self.dataset_input)
def test_invalid_sample_weights_argument(self):
with self.assertRaisesRegexp(ValueError,
r'`sample_weight` argument is not supported'):
self.adapter_cls(self.dataset_input, sample_weights=self.dataset_input)
class GeneratorDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super(GeneratorDataAdapterTest, self).setUp()
self.adapter_cls = data_adapter.GeneratorDataAdapter
def test_can_handle(self):
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertTrue(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.generator_input, steps_per_epoch=10)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@test_util.run_v2_only
@data_utils.dont_use_multiprocessing_pool
def test_with_multiprocessing_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.iterator_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
# Fit twice to ensure there isn't any duplication that prevent the worker
# from starting.
self.model.fit(self.iterator_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
def test_size(self):
adapter = self.adapter_cls(self.generator_input)
self.assertIsNone(adapter.get_size())
def test_batch_size(self):
adapter = self.adapter_cls(self.generator_input)
self.assertEqual(adapter.batch_size(), None)
self.assertEqual(adapter.representative_batch_size(), 5)
def test_partial_batch(self):
adapter = self.adapter_cls(self.generator_input)
self.assertFalse(adapter.has_partial_batch())
self.assertIsNone(adapter.partial_batch_size())
def test_invalid_targets_argument(self):
with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'):
self.adapter_cls(self.generator_input, y=self.generator_input)
def test_invalid_sample_weights_argument(self):
with self.assertRaisesRegexp(ValueError,
r'`sample_weight` argument is not supported'):
self.adapter_cls(
self.generator_input, sample_weights=self.generator_input)
class KerasSequenceAdapterTest(DataAdapterTestBase):
def setUp(self):
super(KerasSequenceAdapterTest, self).setUp()
self.adapter_cls = data_adapter.KerasSequenceAdapter
def test_can_handle(self):
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertTrue(self.adapter_cls.can_handle(self.sequence_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.sequence_input)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@test_util.run_v2_only
@data_utils.dont_use_multiprocessing_pool
def test_with_multiprocessing_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
# Fit twice to ensure there isn't any duplication that prevent the worker
# from starting.
self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
def test_size(self):
adapter = self.adapter_cls(self.sequence_input)
self.assertEqual(adapter.get_size(), 10)
def test_batch_size(self):
adapter = self.adapter_cls(self.sequence_input)
self.assertEqual(adapter.batch_size(), None)
self.assertEqual(adapter.representative_batch_size(), 5)
def test_partial_batch(self):
adapter = self.adapter_cls(self.sequence_input)
self.assertFalse(adapter.has_partial_batch())
self.assertIsNone(adapter.partial_batch_size())
def test_invalid_targets_argument(self):
with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'):
self.adapter_cls(self.sequence_input, y=self.sequence_input)
def test_invalid_sample_weights_argument(self):
with self.assertRaisesRegexp(ValueError,
r'`sample_weight` argument is not supported'):
self.adapter_cls(self.sequence_input, sample_weights=self.sequence_input)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| apache-2.0 |
JonWel/CoolProp | Web/scripts/fluid_properties.Consistency.py | 3 | 1267 | from __future__ import print_function
import os.path
import CoolProp
import subprocess
import sys
web_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
root_dir = os.path.abspath(os.path.join(web_dir, '..'))
fluids_path = os.path.join(web_dir,'fluid_properties','fluids')
plots_path = os.path.join(web_dir,'fluid_properties','fluids','Consistencyplots')
template = """from __future__ import division, print_function
import matplotlib
matplotlib.use('Agg') #Force mpl to use a non-GUI backend
import matplotlib.pyplot as plt
from CoolProp.Plots.ConsistencyPlots import ConsistencyFigure
ff = ConsistencyFigure('{fluid:s}')
ff.savefig('{fluid:s}.png', dpi = 30)
ff.savefig('{fluid:s}.pdf')
plt.close()
del ff
"""
if not os.path.exists(plots_path):
os.makedirs(plots_path)
for fluid in CoolProp.__fluids__:
print('fluid:', fluid)
file_string = template.format(fluid = fluid)
file_path = os.path.join(plots_path, fluid + '.py')
print('Writing to', file_path)
with open(file_path, 'w') as fp:
fp.write(file_string)
print('calling:', 'python "' + fluid + '.py"', 'in',plots_path)
subprocess.check_call('python "' + fluid + '.py"', cwd = plots_path, stdout = sys.stdout, stderr = sys.stderr, shell = True) | mit |
YinongLong/scikit-learn | sklearn/ensemble/tests/test_voting_classifier.py | 21 | 10259 | """Testing for the VotingClassifier"""
import numpy as np
from sklearn.utils.testing import assert_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn import datasets
from sklearn.model_selection import cross_val_score
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_estimator_init():
eclf = VotingClassifier(estimators=[])
msg = ('Invalid `estimators` attribute, `estimators` should be'
' a list of (string, estimator) tuples')
assert_raise_message(AttributeError, msg, eclf.fit, X, y)
clf = LogisticRegression(random_state=1)
eclf = VotingClassifier(estimators=[('lr', clf)], voting='error')
msg = ('Voting must be \'soft\' or \'hard\'; got (voting=\'error\')')
assert_raise_message(ValueError, msg, eclf.fit, X, y)
eclf = VotingClassifier(estimators=[('lr', clf)], weights=[1, 2])
msg = ('Number of classifiers and weights must be equal'
'; got 2 weights, 1 estimators')
assert_raise_message(ValueError, msg, eclf.fit, X, y)
def test_predictproba_hardvoting():
eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
('lr2', LogisticRegression())],
voting='hard')
msg = "predict_proba is not available when voting='hard'"
assert_raise_message(AttributeError, msg, eclf.predict_proba, X)
def test_notfitted():
eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
('lr2', LogisticRegression())],
voting='soft')
msg = ("This VotingClassifier instance is not fitted yet. Call \'fit\'"
" with appropriate arguments before using this method.")
assert_raise_message(NotFittedError, msg, eclf.predict_proba, X)
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
def test_parallel_predict():
"""Check parallel backend of VotingClassifier on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf1 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
n_jobs=1).fit(X, y)
eclf2 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
n_jobs=2).fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
def test_sample_weight():
"""Tests sample_weight parameter of VotingClassifier"""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = SVC(probability=True, random_state=123)
eclf1 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('svc', clf3)],
voting='soft').fit(X, y, sample_weight=np.ones((len(y),)))
eclf2 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('svc', clf3)],
voting='soft').fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
sample_weight = np.random.RandomState(123).uniform(size=(len(y),))
eclf3 = VotingClassifier(estimators=[('lr', clf1)], voting='soft')
eclf3.fit(X, y, sample_weight)
clf1.fit(X, y, sample_weight)
assert_array_equal(eclf3.predict(X), clf1.predict(X))
assert_array_equal(eclf3.predict_proba(X), clf1.predict_proba(X))
clf4 = KNeighborsClassifier()
eclf3 = VotingClassifier(estimators=[
('lr', clf1), ('svc', clf3), ('knn', clf4)],
voting='soft')
msg = ('Underlying estimator \'knn\' does not support sample weights.')
assert_raise_message(ValueError, msg, eclf3.fit, X, y, sample_weight)
| bsd-3-clause |
bbfamily/abu | abupy/UmpBu/ABuUmpEdgeBase.py | 1 | 25925 | # -*- encoding:utf-8 -*-
"""
边裁基础实现模块
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import logging
import os
from abc import abstractmethod
import numpy as np
import sklearn.preprocessing as preprocessing
from enum import Enum
from sklearn.metrics.pairwise import pairwise_distances
from ..CoreBu import ABuEnv
from ..UtilBu import ABuFileUtil
from ..SimilarBu.ABuCorrcoef import ECoreCorrType, corr_xy
from .ABuUmpBase import AbuUmpBase
# noinspection PyUnresolvedReferences
from ..CoreBu.ABuFixes import filter
__author__ = '阿布'
__weixin__ = 'abu_quant'
"""在predict中度量输入的x和矩阵中其它矢量的pairwise_distances后,通过if distances_cx.min() > K_DISTANCE_THRESHOLD过滤"""
K_DISTANCE_THRESHOLD = 0.668
"""从第一轮pairwise_distances的结果使用argsort后取K_N_TOP_SEED个做为第二轮相似匹配的种子"""
K_N_TOP_SEED = 100
"""完成第二轮相似度匹配后使用K_SIMILAR_THRESHOLD做为阀值过滤后得到有投票权的向量"""
K_SIMILAR_THRESHOLD = 0.91
"""
K_CG_TOP_RATE做为计算win_top和loss_top
win_top = len(self.fiter.df['profit_cg']) - len(self.fiter.df['profit_cg']) * K_CG_TOP_RATE
eg:
len(self.fiter.df['profit_cg']) == 100
-> win_top = 100 - 100 * 0.236
-> win_top = 100 - 23.6
-> win_top = 76.4
loss_top = len(self.fiter.df['profit_cg']) * K_CG_TOP_RATE
eg:
len(self.fiter.df['profit_cg']) == 100
-> loss_top = 100 * 0.236
-> loss_top = 23.6
"""
K_CG_TOP_RATE = 0.236
"""在predict中最后的投票结果需要大于一定比例才被认可, 即对有争议的投票需要一方拥有相对优势才认可"""
K_EDGE_JUDGE_RATE = 0.618
class EEdgeType(Enum):
"""对交易的利润亏损进行rank后的分类结果"""
"""损失最多的一类交易,可理解为最底端"""
E_EEdge_TOP_LOSS = -1
"""其它的普通收益亏损的交易,在整个训练集交易中占最多数"""
E_EEdge_NORMAL = 0
"""盈利最多的一类交易,可理解为最顶端"""
E_STORE_TOP_WIN = 1
"""在第二轮的相似度匹配中使用的方法,传递给ABuCorrcoef.corr_xy函数"""
g_similar_type = ECoreCorrType.E_CORE_TYPE_PEARS
class AbuUmpEdgeBase(AbuUmpBase):
"""边裁基类"""
@classmethod
def ump_edge_clf_dump(cls, orders_pd_train, show_info=False, market_name=None):
"""
类方法,通过交易训练集orders_pd_train构造AbuUmpEdgeBase子类对象, 使用fit方法对训练集进行特征采集,后进行dump_clf即
本地序列化存贮等工作
:param orders_pd_train: 交易训练集,pd.DataFrame对象
:param show_info: 是否显示edge.fiter.df.head(),默认False
:param market_name: 主裁训练或者获取裁判对应的存贮唯一名称,默认None, 根据env中的当前市场设置存储名称
:return: AbuUmpEdgeBase子类对象实例
"""
edge = cls(orders_pd_train, market_name=market_name)
edge.fit()
edge.dump_clf()
if show_info:
print('edge.fiter.df.head():\n', edge.fiter.df.head())
return edge
@abstractmethod
def get_fiter_class(self):
"""abstractmethod子类必须实现,声明具体子类裁判使用的筛选特征形成特征的类"""
pass
@abstractmethod
def get_predict_col(self):
"""abstractmethod子类必须实现,获取具体子类裁判需要的特征keys"""
pass
@classmethod
@abstractmethod
def class_unique_id(cls):
"""
具体ump类关键字唯一名称,类方法,abstractmethod子类必须实现
主要针对外部user设置自定义ump使用, 需要user自己保证class_unique_id的唯一性,内部不做检测
具体使用见ABuUmpManager中extend_ump_block方法
"""
pass
def __init__(self, orders_pd=None, predict=False, market_name=None, **kwarg):
"""
:param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象, 最好是经过度量类
AbuMetricsBase对象进行度量fit_metrics之后的orders_pd
:param predict: 是否构造的裁判类型为预测,非训练裁判
:param market_name: 主裁训练或者获取裁判对应的存贮唯一名称,默认None, 根据env中的当前市场设置存储名称
:param kwarg: 将kwarg参数透传给fiter_cls的构造:
self.fiter = self.fiter_cls(orders_pd=orders_pd, **kwarg)
"""
# 特征筛选类fiter_cls
self.fiter_cls = self.get_fiter_class()
# 对交易特征进行统一标准化的scaler对象
self.scaler = preprocessing.StandardScaler()
if isinstance(market_name, ABuEnv.EMarketTargetType):
market_name = market_name.value
# predict或者训练的情况都需要对应裁判的唯一名称, 默认使用对应市场的字符串名字 eg,'us', 'cn'
self.market_name = ABuEnv.g_market_target.value if market_name is None else market_name
if not predict:
# TODO 拆开predict和训练数据逻辑,不要纠缠在一起
if orders_pd is not None and 'profit_cg' not in orders_pd.columns:
# profit_cg等度量参数是要在AbuMetricsBase结束后才会有
logging.info('you do better AbuMetricsBase.fit_metrics in orders_pd!!!!')
from ..MetricsBu.ABuMetricsBase import AbuMetricsBase
# 这里只做fit_metrics_order,没做fit_metrics因为比如期货,比特币会有自己的度量类,使用通用的fit_metrics_order
AbuMetricsBase(orders_pd, None, None, None).fit_metrics_order()
# 实例化特征构造对象self.fiter
self.fiter = self.fiter_cls(orders_pd=orders_pd, **kwarg)
"""
通过self.fiter_cls构造形成self.fiter后self.fiter.df中以存在特征
eg:self.fiter.df
profit profit_cg buy_deg_ang42 buy_deg_ang252 buy_deg_ang60 buy_deg_ang21
2014-09-24 -22618.04 -0.0566 3.378 3.458 3.458 1.818
2014-10-24 -29690.28 -0.0742 0.191 2.889 2.809 -1.089
2014-10-29 18959.19 0.0542 -2.026 16.689 -0.761 1.980
2014-10-29 148209.36 0.5022 -3.427 -11.956 -8.296 6.507
2014-10-29 24867.60 0.0952 -2.915 39.469 -6.043 7.046
"""
# 默认使用svm,这里需要参数可设置
self.fiter().estimator.svc()
def fit(self):
"""
边裁训练集拟合存储函数,相对主裁的训练fit函数,边裁的fit很简单
self.fiter.df经过fit后添加了新列p_rk_cg和rk形式如下所示
eg:self.fiter.df
profit profit_cg buy_deg_ang42 buy_deg_ang252 \
2014-09-24 -22618.04 -0.0566 3.378 3.458
2014-10-24 -29690.28 -0.0742 0.191 2.889
2014-10-29 18959.19 0.0542 -2.026 16.689
2014-10-29 148209.36 0.5022 -3.427 -11.956
2014-10-29 24867.60 0.0952 -2.915 39.469
2014-10-29 18959.19 0.0542 -2.026 16.689
2014-11-03 1250.80 0.0045 0.103 39.202
2014-11-11 59888.21 0.1857 8.341 -9.450
2014-11-12 -3578.78 -0.0140 3.963 6.595
2014-11-26 -29085.19 -0.0946 14.052 6.061
... ... ... ... ...
2016-03-14 16220.57 0.0559 4.002 -10.559
2016-03-14 -25328.12 -0.1218 0.129 -6.649
2016-03-30 -29858.44 -0.0863 13.121 -8.461
2016-04-04 5373.76 0.0244 4.409 -33.097
2016-04-13 -28044.40 -0.1159 6.603 -31.459
2016-04-14 -18645.93 -0.0467 4.611 18.428
2016-04-15 -32484.79 -0.1149 4.238 -13.247
2016-04-15 -32484.79 -0.1149 4.238 -13.247
2016-04-29 290.96 0.0007 1.445 16.266
2016-04-29 290.96 0.0007 1.445 16.266
buy_deg_ang60 buy_deg_ang21 p_rk_cg rk
2014-09-24 3.458 1.818 19.0 0
2014-10-24 2.809 -1.089 13.0 -1
2014-10-29 -0.761 1.980 35.5 0
2014-10-29 -8.296 6.507 56.0 1
2014-10-29 -6.043 7.046 43.0 1
2014-10-29 -0.761 1.980 35.5 0
2014-11-03 -4.614 10.125 28.0 0
2014-11-11 0.730 12.397 48.0 1
2014-11-12 -7.524 6.671 23.0 0
2014-11-26 7.566 12.494 9.0 -1
... ... ... ... ..
2016-03-14 -7.992 9.324 37.0 0
2016-03-14 -10.880 5.201 2.0 -1
2016-03-30 4.498 4.070 12.0 -1
2016-04-04 -6.281 5.618 33.0 0
2016-04-13 0.191 4.457 4.0 -1
2016-04-14 3.134 0.733 20.0 0
2016-04-15 4.693 1.162 5.5 -1
2016-04-15 4.693 1.162 5.5 -1
2016-04-29 4.615 -1.115 24.5 0
2016-04-29 4.615 -1.115 24.5 0
边裁裁决方式多次使用非均衡技术对最后的结果概率进行干预,目的是使最终的裁决正确率达成非均衡的目标,
非均衡技术思想是量化中很很重要的一种设计思路,因为我们量化的目标结果就是非均衡(我们想要赢的钱比输的多)
"""
# 对训练特征fiter.df中的profit_cg进行rank,即针对训练集中的交易盈利亏损值进行rank排序, rank结果添加到self.fiter.df新列
# TODO 暂时只使用profit_cg不使用profit做为训练参数,需要整合profit为训练的rank等综合权重处理
self.fiter.df['p_rk_cg'] = self.fiter.df['profit_cg'].rank()
"""
eg: self.fiter.df['p_rk_cg']
2014-09-24 19.0
2014-10-24 13.0
2014-10-29 35.5
2014-10-29 56.0
2014-10-29 43.0
2014-10-29 35.5
2014-11-03 28.0
2014-11-11 48.0
2014-11-12 23.0
2014-11-26 9.0
...
2016-03-14 37.0
2016-03-14 2.0
2016-03-30 12.0
2016-04-04 33.0
2016-04-13 4.0
2016-04-14 20.0
2016-04-15 5.5
2016-04-15 5.5
2016-04-29 24.5
2016-04-29 24.5
"""
# K_CG_TOP_RATE=0.236, 由于策略的胜负的非均衡,win_top的位置实际比较loss_top为非均衡,为后续制造概率优势
win_top = len(self.fiter.df['profit_cg']) - len(self.fiter.df['profit_cg']) * K_CG_TOP_RATE
"""
eg:
len(self.fiter.df['profit_cg']) == 100
-> win_top = 100 - 100 * 0.236
-> win_top = 100 - 23.6
-> win_top = 76.4
"""
loss_top = len(self.fiter.df['profit_cg']) * K_CG_TOP_RATE
"""
eg:
len(self.fiter.df['profit_cg']) == 100
-> loss_top = 100 * 0.236
-> loss_top = 23.6
"""
# self.fiter.df添加新列'rk',初始值都为EEdgeType.E_EEdge_NORMAL.value,即0
self.fiter.df['rk'] = EEdgeType.E_EEdge_NORMAL.value
"""
根据win_top, loss_top将整体切分为三段,rk:-1, 0, 1
rk profit_cg p_rk_cg
2011-09-21 0 0.036216 58816.0
2011-09-21 1 0.046784 61581.0
2011-09-21 -1 -0.191184 1276.0
2011-09-21 0 -0.000428 43850.0
2011-09-21 0 0.001724 44956.0
"""
# noinspection PyTypeChecker
self.fiter.df['rk'] = np.where(self.fiter.df['p_rk_cg'] > win_top, EEdgeType.E_STORE_TOP_WIN.value,
self.fiter.df['rk'])
# noinspection PyTypeChecker
self.fiter.df['rk'] = np.where(self.fiter.df['p_rk_cg'] < loss_top, EEdgeType.E_EEdge_TOP_LOSS.value,
self.fiter.df['rk'])
def dump_file_fn(self):
"""
边裁本地缓存的存储路径规则:
ABuEnv.g_project_data_dir + 'ump/ump_edge_' + market_name + self.class_unique_id()
"""
# TODO 如果有裁判覆盖,保留备份,显示通知
unique_ump_name = 'ump/ump_edge_{}_{}'.format(self.market_name, self.class_unique_id())
return os.path.join(ABuEnv.g_project_data_dir, unique_ump_name)
def dump_clf(self):
"""
边裁的本地序列化相对主裁的dump_clf也简单很多,
将self.fiter.df和self.fiter.x打包成一个字典对象df_x_dict
通过ABuFileUtil.dump_pickle进行保存
"""
df_x_dict = {'fiter_df': self.fiter.df, 'fiter_x': self.fiter.x}
"""
eg:df_x_dict
array([[ 3.378, 3.458, 3.458, 1.818],
[ 0.191, 2.889, 2.809, -1.089],
[ -2.026, 16.689, -0.761, 1.98 ],
[ -3.427, -11.956, -8.296, 6.507],
[ -2.915, 39.469, -6.043, 7.046],
[ -2.026, 16.689, -0.761, 1.98 ],
[ 0.103, 39.202, -4.614, 10.125],
[ 8.341, -9.45 , 0.73 , 12.397],
[ 3.963, 6.595, -7.524, 6.671],
....................................
[ 4.002, -10.559, -7.992, 9.324],
[ 0.129, -6.649, -10.88 , 5.201],
[ 13.121, -8.461, 4.498, 4.07 ],
[ 4.409, -33.097, -6.281, 5.618],
[ 6.603, -31.459, 0.191, 4.457],
[ 4.611, 18.428, 3.134, 0.733],
[ 4.238, -13.247, 4.693, 1.162],
[ 4.238, -13.247, 4.693, 1.162],
[ 1.445, 16.266, 4.615, -1.115],
[ 1.445, 16.266, 4.615, -1.115]])
"""
ABuFileUtil.dump_pickle(df_x_dict, self.dump_file_fn(), how='zero')
def predict(self, **kwargs):
"""
边裁交易决策函数,从CachedUmpManager中获取缓存df_x_dict,对kwargs关键字参数所描述的交易特征进行拦截决策
边裁的predict()实现相对主裁来说比较复杂,大致思路如下:
1. 从输入的新交易中挑选需要的特征组成x
2. 将x和之前保存的训练集数据组合concatenate(),一起做数据标准化scaler
3. 使用sklearn.metrics.pairwise.pairwise_distances()度量输入特征和训练集矩阵中的距离序列
4. 取pairwise_distances() TOP个作为种子,继续匹配相似度
5. 相似度由大到小排序,保留大于保留阀值的相似度交易数据做为最终有投票权利的
6. 保留的交易认为是与新交易最相似的交易,保留的交易使用之前非均衡的rk对新交易进行投票
7. 最后的判断需要大于一定比例才被结果认可,即再次启动非均衡
:param kwargs: 需要和子类对象实现的虚方法get_predict_col中获取特征列对应的
关键字参数,eg: buy_deg_ang42=3.378, buy_deg_ang60=3.458
buy_deg_ang21=3.191, buy_deg_ang252=1.818
:return: 是否对kwargs关键字参数所描述的交易特征进行拦截,
EEdgeType: 不拦截: EEdgeType.E_EEdge_NORMAL or EEdgeType.E_STORE_TOP_WIN
拦截: EEdgeType.E_EEdge_TOP_LOSS
"""
# 统一从CachedUmpManager中获取缓存ump,没有缓存的情况下load_pickle
df_x_dict = AbuUmpBase.dump_clf_manager.get_ump(self)
# 从df_x_dict['fiter_df'].columns中筛选特征列
feature_columns = df_x_dict['fiter_df'].columns.drop(['profit', 'profit_cg', 'p_rk_cg', 'rk'])
"""
eg: df_x_dict['fiter_df'].columns
Index(['profit', 'profit_cg', 'buy_deg_ang42', 'buy_deg_ang252',
'buy_deg_ang60', 'buy_deg_ang21', 'p_rk_cg', 'rk'], dtype='object')
drop(['profit', 'profit_cg', 'p_rk_cg', 'rk']
-> ['buy_deg_ang42', 'buy_deg_ang252', 'buy_deg_ang60', 'buy_deg_ang21']
"""
# eg, x: array([ 3.378, 3.458, 3.458, 1.818])
x = np.array([kwargs[col] for col in feature_columns])
x = x.reshape(1, -1)
# 把新的x concatenate到之前保存的矩阵中
con_x = np.concatenate((x, df_x_dict['fiter_x']), axis=0)
# 将输入的x和原始矩阵组装好的新矩阵con_x一起标准化
con_x = self.scaler.fit_transform(con_x)
# 使用输入的x即con_x[0]和矩阵中其它的进行pairwise_distances比较
distances_cx = pairwise_distances(con_x[0].reshape(1, -1), con_x[1:],
metric='euclidean')
distances_cx = distances_cx[0]
"""
eg: distances_cx
array([[ 0. , 0.8432, 1.4371, 2.4178, 3.1302, 1.4371, 3.1774,
2.5422, 1.7465, 3.0011, 0.7233, 2.264 , 0.8279, 0.8279,
2.309 , 1.4878, 1.9396, 0.7438, 0.9731, 0.4494, 2.0755,
2.9762, 4.5869, 5.2029, 0.7362, 0.7362, 3.623 , 0.6105,
0.6105, 1.2288, 2.0991, 2.0991, 3.2272, 0.8599, 0.7419,
0.7419, 0.7804, 2.5241, 1.8116, 2.5373, 2.2742, 2.1726,
3.2738, 1.293 , 2.4555, 2.4555, 2.3358, 2.1673, 2.0187,
2.8637, 2.5066, 1.052 , 1.1481, 1.1481, 1.1175, 1.1175]])
"""
# 如果最小距离大于阀值,认为无效,K_DISTANCE_THRESHOLD = 0.668
if distances_cx.min() > K_DISTANCE_THRESHOLD:
return EEdgeType.E_EEdge_NORMAL
distances_sort = distances_cx.argsort()
"""
eg: distances_sort
array([ 0, 19, 28, 27, 10, 24, 25, 35, 34, 17, 36, 13, 12, 1, 33, 18, 51,
54, 55, 52, 53, 29, 43, 5, 2, 15, 8, 38, 16, 48, 20, 30, 31, 47,
41, 11, 40, 14, 46, 3, 45, 44, 50, 37, 39, 7, 49, 21, 9, 4, 6,
32, 42, 26, 22, 23])
"""
n_top = K_N_TOP_SEED if len(distances_cx) > K_N_TOP_SEED else len(distances_cx)
# 取前100个作为种子继续匹配相似度做数据准备
distances_sort = distances_sort[:n_top]
# 进行第二轮的相似度匹配,使用输入的x即con_x[0]和distances_sort中记录的其它矩阵矢量进行corr_xy
similar_cx = {arg: corr_xy(con_x[0], con_x[arg + 1], g_similar_type) for arg in distances_sort}
"""
eg: similar_cx
{0: 1.0, 19: 0.9197507467964976, 28: 0.57289288329659238, 27: 0.57289288329659238,
10: 0.44603792013583493, 24: 0.4103293780402798, 25: 0.4103293780402798,
35: 0.22026514236282496, 34: 0.22026514236282496, 17: -0.24170074544552811,
36: 0.43863838382081699, 13: 0.16234971594751921, 12: 0.16234971594751921, 1: 0.92424298737490296,
33: 0.47818723914034433, 18: -0.17734957863273493, 51: 0.63704694680797502, 54: 0.75395818997353681,
55: 0.75395818997353681, 52: 0.6485413094804453, 53: 0.6485413094804453,
29: 0.89796883127042837, 43: 0.86342390437553329, 5: 0.12738173851484677,
2: 0.12738173851484677, 15: 0.53496775815355813, 8: -0.92624283913287053,
38: -0.52046967255944876, 16: -0.65837858483393186, 48: 0.26241267262766549,
20: 0.45007515315947716, 30: -0.78037071039800843, 31: -0.78037071039800843,
47: -0.99196576241088685, 41: 0.71286817166895511, 11: -0.57565781272205685,
40: -0.089683927257343574, 14: -0.49743962329463148, 46: -0.84622925585859421, 3: -0.82066914234853283,
45: 0.30735926720691314, 44: 0.30735926720691314, 50: 0.010871213734502339, 37: -0.65150765047066517,
39: -0.38809703338219459, 7: -0.57947244493007666, 49: -0.33103296960584466, 21: 0.69444344588208717,
9: -0.3435188573004419, 4: -0.39204446380766983, 6: -0.54996919528831723, 32: -0.9481034251744791,
42: 0.20829094732022327, 26: 0.9936229414412624, 22: -0.35972456962349542, 23: -0.085747705364200594}
"""
# 相似度大到小排序
similar_sorted = sorted(zip(similar_cx.values(), similar_cx.keys()))[::-1]
"""
eg: similar_sorted
[(1.0, 0), (0.9936229414412624, 26), (0.92424298737490296, 1), (0.9197507467964976, 19), (
0.89796883127042837, 29), (0.86342390437553329, 43), (0.75395818997353681, 55), (0.75395818997353681, 54),
(0.71286817166895511, 41), (0.69444344588208717, 21), (0.6485413094804453, 53), (0.6485413094804453, 52),
(0.63704694680797502, 51), (0.57289288329659238, 28), (0.57289288329659238, 27), (0.53496775815355813, 15),
(0.47818723914034433, 33), (0.45007515315947716, 20), (0.44603792013583493, 10), (0.43863838382081699, 36),
(0.4103293780402798, 25), (0.4103293780402798, 24), (0.30735926720691314, 45), (0.30735926720691314, 44),
(0.26241267262766549, 48), (0.22026514236282496, 35), (0.22026514236282496, 34), (0.20829094732022327, 42),
(0.16234971594751921, 13), (0.16234971594751921, 12), (0.12738173851484677, 5), (0.12738173851484677, 2),
(0.010871213734502339, 50), (-0.085747705364200594, 23), (-0.089683927257343574, 40),
(-0.17734957863273493, 18), (-0.24170074544552811, 17), (-0.33103296960584466, 49),
(-0.3435188573004419, 9), (-0.35972456962349542, 22), (-0.38809703338219459, 39),
(-0.39204446380766983, 4), (-0.49743962329463148, 14), (-0.52046967255944876, 38),
(-0.54996919528831723, 6), (-0.57565781272205685, 11), (-0.57947244493007666, 7),
(-0.65150765047066517, 37), (-0.65837858483393186, 16), (-0.78037071039800843, 31),
(-0.78037071039800843, 30), (-0.82066914234853283, 3), (-0.84622925585859421, 46),
(-0.92624283913287053, 8), (-0.9481034251744791, 32), (-0.99196576241088685, 47)]
"""
# 只取大于阀值相似度K_SIMILAR_THRESHOLD的做为最终有投票权利的
similar_filters = list(filter(lambda sm: sm[0] > K_SIMILAR_THRESHOLD, similar_sorted))
"""
eg: similar_filters
[(1.0, 0), (0.9936229414412624, 26), (0.92424298737490296, 1), (0.9197507467964976, 19)]
"""
if len(similar_filters) < int(n_top * 0.1):
# 投票的太少,初始相似种子n_top的0.1为阀值,认为无效,eg:int(100 * 0.1) == 10
return EEdgeType.E_EEdge_NORMAL
top_loss_cluster_cnt = 0
top_win_cluster_cnt = 0
# 由于gmm_component_filter中win_top的非均衡,导致top_win_cluster_cnt > top_loss_cluster_cnt概率大
for similar in similar_filters:
"""
eg:
similar: (0.9936229414412624, 26)
order_ind = similar[1] = 26
similar_val = similar[0] = 0.9936229414412624
"""
order_ind = similar[1]
similar_val = similar[0]
# 通过order_ind获取有投票权利的交易的rk值
rk = df_x_dict['fiter_df'].iloc[order_ind]['rk']
# 对应这个最相似的在哪一个分类中,判断edge
if rk == -1:
# 需要 * similar_val eg: top_loss_cluster_cnt += 1 * 0.9936229414412624
top_loss_cluster_cnt += 1 * similar_val
elif rk == 1:
top_win_cluster_cnt += 1 * similar_val
# 最后的投票结果需要大于一定比例才被认可, 即对有争议的投票需要一方拥有相对优势才认可
if int(top_win_cluster_cnt * K_EDGE_JUDGE_RATE) > top_loss_cluster_cnt:
"""
eg: top_win_cluster_cnt = 100
top_loss_cluster_cnt = 50
int(top_win_cluster_cnt * K_EDGE_JUDGE_RATE) == 62
62 > 50 -> EEdgeType.E_STORE_TOP_WIN
"""
return EEdgeType.E_STORE_TOP_WIN
elif int(top_loss_cluster_cnt * K_EDGE_JUDGE_RATE) > top_win_cluster_cnt:
"""
eg: top_loss_cluster_cnt = 100
top_win_cluster_cnt = 50
int(top_loss_cluster_cnt * K_EDGE_JUDGE_RATE) == 62
62 > 50 -> EEdgeType.E_EEdge_TOP_LOSS
"""
# 由于top_win_cluster_cnt > top_loss_cluster_cnt的非均衡本来就有概率优势,* K_EDGE_JUDGE_RATE进一步扩大概率优势
return EEdgeType.E_EEdge_TOP_LOSS
return EEdgeType.E_EEdge_NORMAL
| gpl-3.0 |
mrahnis/orangery | orangery/cli/segment.py | 1 | 2695 | import sys
import logging
import time
import json
import click
import pandas as pnd
import matplotlib.pyplot as plt
import orangery as o
@click.command(options_metavar='<options>')
@click.argument('areas_f', nargs=1, type=click.Path(exists=True), metavar='<areas_file>')
@click.argument('materials_f', nargs=1, type=click.Path(exists=True), metavar='<materials_file>')
def segment(areas_f, materials_f):
"""Prompt the user to assign materials to polygon areas listed in a csv file.
The segment subcommand takes two arguments: A path to a csv file listing cut-and-fill polygon areas and a path to a JSON file listing possible materials.
The csv file listing the cut-and-fill polygon areas is created with the --save option of the cutfill subcommand.
\b
Example:
orangery segment XS-3-20130514-20170609.csv materials.json
"""
def __assign_material(p, low, high):
prompt = 'Enter a material no. for area {0}: '.format(p)
err = 'Input must be an integer number between {0} and {1}.'.format(low, high)
while True:
try:
m = int(input(prompt))
if low <= m <= high:
return m
else:
print(err)
except ValueError:
print(err)
areas = pnd.read_csv(areas_f, index_col=0)
# materials list and array to track assignment of material to polygon
materials = json.load(open(materials_f, 'r'))
materials = materials['materials']
assignments = []
print('\n')
print('Areas')
print('--------------------')
print(areas)
print('-------------------')
print('\n')
print("No. Material")
print('-------------------')
for i, material in enumerate(materials):
print(i, " ", material['name'])
print('\n')
print("Assign a material, by number, to each area")
print('-------------------')
for i, area in areas.iterrows():
m = __assign_material(i, 0, len(materials)-1)
assignments.append([i, m, materials[m]['name'], materials[m]['density'], materials[m]['fines']])
assignments_df = pnd.DataFrame(assignments, columns=['polygon', 'material', 'name', 'density', 'fines'])
result = assignments_df.join(areas)
result['mass_fines'] = result['density']*result['fines']/100*result['area']
print('\n')
print('Results ')
print('-------------------')
print(result)
print('-------------------')
print('Net change in mass of fines: ', result['mass_fines'].sum())
print('\n')
input("Press Enter to exit")
outfile = areas_f.split('.')[0] + '-sgmt.' + areas_f.split('.')[1]
result.to_csv(outfile) | bsd-3-clause |
Aasmi/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 32 | 6044 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
# Test NNDSVD behaviour on negative input
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
# Test that NNDSVD does not return negative values
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
# the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
# Test model fit behaviour on negative input
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
# Test that the fit is not too far away
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
# Test that NMF.transform returns close values
# (transform uses scipy.optimize.nnls for now)
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF()
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
| bsd-3-clause |
xavierwu/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 254 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
ElDeveloper/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 228 | 11221 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
pinkavaj/rstt | gr-rstt/python/nle2.py | 1 | 4939 | #!/usr/bin/python3
import numpy as np
import matplotlib.pyplot as plt
import sys
class Src:
def __init__(self, fname, block_size, navg = 1):
self.offs = 0
self.navg = navg
self.block_size = block_size
self.data = np.fromfile(fname, dtype=np.float32)
l = divmod(len(self.data), block_size)[0]
self.data = self.data[0:l*block_size]
self.data = self.data.reshape(l, block_size)
def read(self):
navg = self.navg
# do frame averaging"
data = np.zeros(self.block_size)
while navg > 0 and self.offs < len(self.data):
data += self.data[self.offs]
self.offs += 1
navg -= 1
if navg:
return None
return data / self.navg
class Extreme:
def __init__(self, data, mean_rel):
"""Data should be integral of signal power value,
mean_rel should be relative mean value, see Split.set_mean for details."""
idx = self.idx = 0
self.val = 0
while idx < len(data):
val = data[idx] - data[0] - mean_rel * idx
if val > self.val:
self.val = val
self.idx = idx
elif -val > self.val:
self.val = -val
self.idx = idx
idx += 1
class Split:
def __init__(self, start, _len):
self.start = start
self.len = _len
def __str__(self):
return "start = %f; len = %f; mean_rel = %f;" % (self.start, self.len, self.mean_rel, )
def get_mean(self, data):
return (self.mean_rel * (self.len - 1) + data[self.start]) / self.len
def set_mean(self, data):
"""Set relative mean value for data in range defined by Split.
Data should be integrated power value of signal."""
if self.len > 1:
l = self.len - 1
self.mean_rel = (data[self.start + l] - data[self.start]) / l
else:
self.mean_rel = 0.
def set_extreme(self, data):
"""Find new extreme."""
self.extreme = Extreme(self.data(data), self.mean_rel)
def data(self, data):
return data[self.start:self.start+self.len]
class Show:
"""Noise level estimation. Input is vector of FFT(1024) series."""
def __init__(self, src):
self.src = src
def run2(self, noise_pct = 0.33, noise_w = 0.05, threshold = 3):
d = self.src.read()
noise_pct = int(self.src.block_size * noise_pct)
noise_w = int(self.src.block_size * noise_w)
while len(d):
# plot: original signal
offs = int(len(d) / 2)
x = range(0 - offs, len(d) - offs)
plt.plot(x, d)
# plot: ln(original signal)
d_log = [np.log(p) for p in d]
min_ = max(d_log)
for p in d_log:
if p < min_ and np.isfinite(p):
min_ = p
d_log = [p if np.isfinite(p) else min_ for p in d_log ]
#plt.plot(x, d_log)
self.write_signal('out', d_log)
# moving average and moving sigma
mean = [sum(d_log[0:noise_w]), ]
d_log2 = [x*x for x in d_log]
mean2 = [sum(d_log2[0:noise_w]), ]
for i in range(noise_w, len(d_log)):
ii = i - noise_w
mean.append(mean[ii] - d_log[ii] + d_log[i])
mean2.append(mean2[ii] - d_log2[ii] + d_log2[i])
mean = [i/noise_w for i in mean]
mean2 = [i/noise_w for i in mean2]
# signal dispersion around moving average
s = []
for i in range(0, len(mean)):
s.append(np.sqrt(mean2[i] - mean[i]**2))
#s_plt = [max(s),] * int(noise_w/2) + s
#s_plt = s_plt + [max(s), ] * (len(x) - len(s))
#plt.plot(x, s_plt)
s.sort()
s = s[:noise_pct]
s = sum(s) / len(s) * threshold
mean.sort()
#plt.plot(range(0, len(mean)), mean)
mean = mean[:noise_pct]
mean = sum(mean) / len(mean)
#plt.plot(x, [mean - s, ] * len(d_log))
#plt.plot(x, [mean, ] * len(d_log))
#plt.plot(x, [mean + s, ] * len(d_log))
print(mean - s, mean, mean + s)
s_lo = [np.exp(mean - s), ] * len(d_log)
s_m = [np.exp(mean), ] * len(d_log)
s_hi = [np.exp(mean + s), ] * len(d_log)
plt.plot(x, s_lo)
plt.plot(x, s_m)
plt.plot(x, s_hi)
plt.show()
plt.close()
def write_signal(self, fname, data):
with open('out', 'w') as f:
i = 0
while i < len(data):
f.write("%.4f, " % data[i])
i += 1
if i % 8 == 0 and i != 0:
f.write("\n")
if __name__ == '__main__':
s = Show(Src(sys.argv[1], 1024, 2**11))
s.run2()
| apache-2.0 |
gfyoung/pandas | pandas/tests/arrays/boolean/test_logical.py | 7 | 8486 | import operator
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.arrays import BooleanArray
from pandas.tests.extension.base import BaseOpsUtil
class TestLogicalOps(BaseOpsUtil):
def test_numpy_scalars_ok(self, all_logical_operators):
a = pd.array([True, False, None], dtype="boolean")
op = getattr(a, all_logical_operators)
tm.assert_extension_array_equal(op(True), op(np.bool_(True)))
tm.assert_extension_array_equal(op(False), op(np.bool_(False)))
def get_op_from_name(self, op_name):
short_opname = op_name.strip("_")
short_opname = short_opname if "xor" in short_opname else short_opname + "_"
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
def test_empty_ok(self, all_logical_operators):
a = pd.array([], dtype="boolean")
op_name = all_logical_operators
result = getattr(a, op_name)(True)
tm.assert_extension_array_equal(a, result)
result = getattr(a, op_name)(False)
tm.assert_extension_array_equal(a, result)
# FIXME: dont leave commented-out
# TODO: pd.NA
# result = getattr(a, op_name)(pd.NA)
# tm.assert_extension_array_equal(a, result)
def test_logical_length_mismatch_raises(self, all_logical_operators):
op_name = all_logical_operators
a = pd.array([True, False, None], dtype="boolean")
msg = "Lengths must match to compare"
with pytest.raises(ValueError, match=msg):
getattr(a, op_name)([True, False])
with pytest.raises(ValueError, match=msg):
getattr(a, op_name)(np.array([True, False]))
with pytest.raises(ValueError, match=msg):
getattr(a, op_name)(pd.array([True, False], dtype="boolean"))
def test_logical_nan_raises(self, all_logical_operators):
op_name = all_logical_operators
a = pd.array([True, False, None], dtype="boolean")
msg = "Got float instead"
with pytest.raises(TypeError, match=msg):
getattr(a, op_name)(np.nan)
@pytest.mark.parametrize("other", ["a", 1])
def test_non_bool_or_na_other_raises(self, other, all_logical_operators):
a = pd.array([True, False], dtype="boolean")
with pytest.raises(TypeError, match=str(type(other).__name__)):
getattr(a, all_logical_operators)(other)
def test_kleene_or(self):
# A clear test of behavior.
a = pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean")
b = pd.array([True, False, None] * 3, dtype="boolean")
result = a | b
expected = pd.array(
[True, True, True, True, False, None, True, None, None], dtype="boolean"
)
tm.assert_extension_array_equal(result, expected)
result = b | a
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
tm.assert_extension_array_equal(
a, pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean")
)
tm.assert_extension_array_equal(
b, pd.array([True, False, None] * 3, dtype="boolean")
)
@pytest.mark.parametrize(
"other, expected",
[
(pd.NA, [True, None, None]),
(True, [True, True, True]),
(np.bool_(True), [True, True, True]),
(False, [True, False, None]),
(np.bool_(False), [True, False, None]),
],
)
def test_kleene_or_scalar(self, other, expected):
# TODO: test True & False
a = pd.array([True, False, None], dtype="boolean")
result = a | other
expected = pd.array(expected, dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = other | a
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
tm.assert_extension_array_equal(
a, pd.array([True, False, None], dtype="boolean")
)
def test_kleene_and(self):
# A clear test of behavior.
a = pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean")
b = pd.array([True, False, None] * 3, dtype="boolean")
result = a & b
expected = pd.array(
[True, False, None, False, False, False, None, False, None], dtype="boolean"
)
tm.assert_extension_array_equal(result, expected)
result = b & a
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
tm.assert_extension_array_equal(
a, pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean")
)
tm.assert_extension_array_equal(
b, pd.array([True, False, None] * 3, dtype="boolean")
)
@pytest.mark.parametrize(
"other, expected",
[
(pd.NA, [None, False, None]),
(True, [True, False, None]),
(False, [False, False, False]),
(np.bool_(True), [True, False, None]),
(np.bool_(False), [False, False, False]),
],
)
def test_kleene_and_scalar(self, other, expected):
a = pd.array([True, False, None], dtype="boolean")
result = a & other
expected = pd.array(expected, dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = other & a
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
tm.assert_extension_array_equal(
a, pd.array([True, False, None], dtype="boolean")
)
def test_kleene_xor(self):
a = pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean")
b = pd.array([True, False, None] * 3, dtype="boolean")
result = a ^ b
expected = pd.array(
[False, True, None, True, False, None, None, None, None], dtype="boolean"
)
tm.assert_extension_array_equal(result, expected)
result = b ^ a
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
tm.assert_extension_array_equal(
a, pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean")
)
tm.assert_extension_array_equal(
b, pd.array([True, False, None] * 3, dtype="boolean")
)
@pytest.mark.parametrize(
"other, expected",
[
(pd.NA, [None, None, None]),
(True, [False, True, None]),
(np.bool_(True), [False, True, None]),
(np.bool_(False), [True, False, None]),
],
)
def test_kleene_xor_scalar(self, other, expected):
a = pd.array([True, False, None], dtype="boolean")
result = a ^ other
expected = pd.array(expected, dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = other ^ a
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
tm.assert_extension_array_equal(
a, pd.array([True, False, None], dtype="boolean")
)
@pytest.mark.parametrize("other", [True, False, pd.NA, [True, False, None] * 3])
def test_no_masked_assumptions(self, other, all_logical_operators):
# The logical operations should not assume that masked values are False!
a = pd.arrays.BooleanArray(
np.array([True, True, True, False, False, False, True, False, True]),
np.array([False] * 6 + [True, True, True]),
)
b = pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean")
if isinstance(other, list):
other = pd.array(other, dtype="boolean")
result = getattr(a, all_logical_operators)(other)
expected = getattr(b, all_logical_operators)(other)
tm.assert_extension_array_equal(result, expected)
if isinstance(other, BooleanArray):
other._data[other._mask] = True
a._data[a._mask] = False
result = getattr(a, all_logical_operators)(other)
expected = getattr(b, all_logical_operators)(other)
tm.assert_extension_array_equal(result, expected)
| bsd-3-clause |
smartscheduling/scikit-learn-categorical-tree | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
bigdataelephants/scikit-learn | sklearn/preprocessing/label.py | 8 | 28313 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Joel Nothman <joel.nothman@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import deprecated, column_or_1d
from ..utils.validation import check_array
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelEncoder was not fitted yet.")
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
self._check_fitted()
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-sequences',
'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The multilabel_ attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The indicator_matrix_ attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
@property
@deprecated("Attribute indicator_matrix_ is deprecated and will be "
"removed in 0.17. Use 'y_type_ == 'multilabel-indicator'' "
"instead")
def indicator_matrix_(self):
return self.y_type_ == 'multilabel-indicator'
@property
@deprecated("Attribute multilabel_ is deprecated and will be removed "
"in 0.17. Use 'y_type_.startswith('multilabel')' "
"instead")
def multilabel_(self):
return self.y_type_.startswith('multilabel')
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelBinarizer was not fitted yet.")
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
self._check_fitted()
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
self._check_fitted()
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1,
sparse_output=False, multilabel=None):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
if multilabel is not None:
warnings.warn("The multilabel parameter is deprecated as of version "
"0.15 and will be removed in 0.17. The parameter is no "
"longer necessary because the value is automatically "
"inferred.", DeprecationWarning)
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
elif y_type == "multilabel-sequences":
Y = MultiLabelBinarizer(classes=classes,
sparse_output=sparse_output).fit_transform(y)
if sp.issparse(Y):
Y.data[:] = pos_label
else:
Y[Y == 1] = pos_label
return Y
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.argsort(classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i+1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
elif output_type == "multilabel-sequences":
warnings.warn('Direct support for sequence of sequences multilabel '
'representation will be unavailable from version 0.17. '
'Use sklearn.preprocessing.MultiLabelBinarizer to '
'convert to a label indicator representation.',
DeprecationWarning)
mlb = MultiLabelBinarizer(classes=classes).fit([])
return mlb.inverse_transform(y)
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
tswast/google-cloud-python | bigquery/tests/unit/test_job.py | 1 | 231908 | # Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent
import copy
import json
import textwrap
import unittest
import freezegun
import mock
import pytest
import requests
from six.moves import http_client
try:
import pandas
except (ImportError, AttributeError): # pragma: NO COVER
pandas = None
try:
import pyarrow
except ImportError: # pragma: NO COVER
pyarrow = None
try:
from google.cloud import bigquery_storage_v1beta1
except (ImportError, AttributeError): # pragma: NO COVER
bigquery_storage_v1beta1 = None
try:
from tqdm import tqdm
except (ImportError, AttributeError): # pragma: NO COVER
tqdm = None
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
def _make_client(project="test-project", connection=None):
from google.cloud.bigquery.client import Client
if connection is None:
connection = _make_connection()
client = Client(project=project, credentials=_make_credentials(), _http=object())
client._connection = connection
return client
def _make_connection(*responses):
import google.cloud.bigquery._http
from google.cloud.exceptions import NotFound
mock_conn = mock.create_autospec(google.cloud.bigquery._http.Connection)
mock_conn.api_request.side_effect = list(responses) + [NotFound("miss")]
return mock_conn
def _make_job_resource(
creation_time_ms=1437767599006,
started_time_ms=1437767600007,
ended_time_ms=1437767601008,
started=False,
ended=False,
etag="abc-def-hjk",
endpoint="https://bigquery.googleapis.com",
job_type="load",
job_id="a-random-id",
project_id="some-project",
user_email="bq-user@example.com",
):
resource = {
"configuration": {job_type: {}},
"statistics": {"creationTime": creation_time_ms, job_type: {}},
"etag": etag,
"id": "{}:{}".format(project_id, job_id),
"jobReference": {"projectId": project_id, "jobId": job_id},
"selfLink": "{}/bigquery/v2/projects/{}/jobs/{}".format(
endpoint, project_id, job_id
),
"user_email": user_email,
}
if started or ended:
resource["statistics"]["startTime"] = started_time_ms
if ended:
resource["statistics"]["endTime"] = ended_time_ms
if job_type == "query":
resource["configuration"]["query"]["destinationTable"] = {
"projectId": project_id,
"datasetId": "_temp_dataset",
"tableId": "_temp_table",
}
return resource
class Test__error_result_to_exception(unittest.TestCase):
def _call_fut(self, *args, **kwargs):
from google.cloud.bigquery import job
return job._error_result_to_exception(*args, **kwargs)
def test_simple(self):
error_result = {"reason": "invalid", "message": "bad request"}
exception = self._call_fut(error_result)
self.assertEqual(exception.code, http_client.BAD_REQUEST)
self.assertTrue(exception.message.startswith("bad request"))
self.assertIn(error_result, exception.errors)
def test_missing_reason(self):
error_result = {}
exception = self._call_fut(error_result)
self.assertEqual(exception.code, http_client.INTERNAL_SERVER_ERROR)
class Test_JobReference(unittest.TestCase):
JOB_ID = "job-id"
PROJECT = "test-project-123"
LOCATION = "us-central"
@staticmethod
def _get_target_class():
from google.cloud.bigquery import job
return job._JobReference
def _make_one(self, job_id, project, location):
return self._get_target_class()(job_id, project, location)
def test_ctor(self):
job_ref = self._make_one(self.JOB_ID, self.PROJECT, self.LOCATION)
self.assertEqual(job_ref.job_id, self.JOB_ID)
self.assertEqual(job_ref.project, self.PROJECT)
self.assertEqual(job_ref.location, self.LOCATION)
def test__to_api_repr(self):
job_ref = self._make_one(self.JOB_ID, self.PROJECT, self.LOCATION)
self.assertEqual(
job_ref._to_api_repr(),
{
"jobId": self.JOB_ID,
"projectId": self.PROJECT,
"location": self.LOCATION,
},
)
def test_from_api_repr(self):
api_repr = {
"jobId": self.JOB_ID,
"projectId": self.PROJECT,
"location": self.LOCATION,
}
job_ref = self._get_target_class()._from_api_repr(api_repr)
self.assertEqual(job_ref.job_id, self.JOB_ID)
self.assertEqual(job_ref.project, self.PROJECT)
self.assertEqual(job_ref.location, self.LOCATION)
class Test_AsyncJob(unittest.TestCase):
JOB_ID = "job-id"
PROJECT = "test-project-123"
LOCATION = "us-central"
@staticmethod
def _get_target_class():
from google.cloud.bigquery import job
return job._AsyncJob
def _make_one(self, job_id, client):
return self._get_target_class()(job_id, client)
def _make_derived_class(self):
class Derived(self._get_target_class()):
_JOB_TYPE = "derived"
return Derived
def _make_derived(self, job_id, client):
return self._make_derived_class()(job_id, client)
@staticmethod
def _job_reference(job_id, project, location):
from google.cloud.bigquery import job
return job._JobReference(job_id, project, location)
def test_ctor_w_bare_job_id(self):
import threading
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertEqual(job.job_id, self.JOB_ID)
self.assertEqual(job.project, self.PROJECT)
self.assertIsNone(job.location)
self.assertIs(job._client, client)
self.assertEqual(
job._properties,
{"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}},
)
self.assertIsInstance(job._completion_lock, type(threading.Lock()))
self.assertEqual(
job.path, "/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID)
)
def test_ctor_w_job_ref(self):
import threading
other_project = "other-project-234"
client = _make_client(project=other_project)
job_ref = self._job_reference(self.JOB_ID, self.PROJECT, self.LOCATION)
job = self._make_one(job_ref, client)
self.assertEqual(job.job_id, self.JOB_ID)
self.assertEqual(job.project, self.PROJECT)
self.assertEqual(job.location, self.LOCATION)
self.assertIs(job._client, client)
self.assertEqual(
job._properties,
{
"jobReference": {
"projectId": self.PROJECT,
"location": self.LOCATION,
"jobId": self.JOB_ID,
}
},
)
self.assertFalse(job._result_set)
self.assertIsInstance(job._completion_lock, type(threading.Lock()))
self.assertEqual(
job.path, "/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID)
)
def test__require_client_w_none(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIs(job._require_client(None), client)
def test__require_client_w_other(self):
client = _make_client(project=self.PROJECT)
other = object()
job = self._make_one(self.JOB_ID, client)
self.assertIs(job._require_client(other), other)
def test_job_type(self):
client = _make_client(project=self.PROJECT)
derived = self._make_derived(self.JOB_ID, client)
self.assertEqual(derived.job_type, "derived")
def test_parent_job_id(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIsNone(job.parent_job_id)
job._properties["statistics"] = {"parentJobId": "parent-job-123"}
self.assertEqual(job.parent_job_id, "parent-job-123")
def test_script_statistics(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIsNone(job.script_statistics)
job._properties["statistics"] = {
"scriptStatistics": {
"evaluationKind": "EXPRESSION",
"stackFrames": [
{
"startLine": 5,
"startColumn": 29,
"endLine": 9,
"endColumn": 14,
"text": "QUERY TEXT",
}
],
}
}
script_stats = job.script_statistics
self.assertEqual(script_stats.evaluation_kind, "EXPRESSION")
stack_frames = script_stats.stack_frames
self.assertEqual(len(stack_frames), 1)
stack_frame = stack_frames[0]
self.assertIsNone(stack_frame.procedure_id)
self.assertEqual(stack_frame.start_line, 5)
self.assertEqual(stack_frame.start_column, 29)
self.assertEqual(stack_frame.end_line, 9)
self.assertEqual(stack_frame.end_column, 14)
self.assertEqual(stack_frame.text, "QUERY TEXT")
def test_num_child_jobs(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertEqual(job.num_child_jobs, 0)
job._properties["statistics"] = {"numChildJobs": "17"}
self.assertEqual(job.num_child_jobs, 17)
def test_labels_miss(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertEqual(job.labels, {})
def test_labels_update_in_place(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
labels = job.labels
labels["foo"] = "bar" # update in place
self.assertEqual(job.labels, {"foo": "bar"})
def test_labels_hit(self):
labels = {"foo": "bar"}
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
job._properties["labels"] = labels
self.assertEqual(job.labels, labels)
def test_etag(self):
etag = "ETAG-123"
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIsNone(job.etag)
job._properties["etag"] = etag
self.assertEqual(job.etag, etag)
def test_self_link(self):
self_link = "https://api.example.com/123"
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIsNone(job.self_link)
job._properties["selfLink"] = self_link
self.assertEqual(job.self_link, self_link)
def test_user_email(self):
user_email = "user@example.com"
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIsNone(job.user_email)
job._properties["user_email"] = user_email
self.assertEqual(job.user_email, user_email)
@staticmethod
def _datetime_and_millis():
import datetime
import pytz
from google.cloud._helpers import _millis
now = datetime.datetime.utcnow().replace(
microsecond=123000, tzinfo=pytz.UTC # stats timestamps have ms precision
)
return now, _millis(now)
def test_created(self):
now, millis = self._datetime_and_millis()
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIsNone(job.created)
stats = job._properties["statistics"] = {}
self.assertIsNone(job.created)
stats["creationTime"] = millis
self.assertEqual(job.created, now)
def test_started(self):
now, millis = self._datetime_and_millis()
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIsNone(job.started)
stats = job._properties["statistics"] = {}
self.assertIsNone(job.started)
stats["startTime"] = millis
self.assertEqual(job.started, now)
def test_ended(self):
now, millis = self._datetime_and_millis()
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIsNone(job.ended)
stats = job._properties["statistics"] = {}
self.assertIsNone(job.ended)
stats["endTime"] = millis
self.assertEqual(job.ended, now)
def test__job_statistics(self):
statistics = {"foo": "bar"}
client = _make_client(project=self.PROJECT)
derived = self._make_derived(self.JOB_ID, client)
self.assertEqual(derived._job_statistics(), {})
stats = derived._properties["statistics"] = {}
self.assertEqual(derived._job_statistics(), {})
stats["derived"] = statistics
self.assertEqual(derived._job_statistics(), statistics)
def test_error_result(self):
error_result = {
"debugInfo": "DEBUG INFO",
"location": "LOCATION",
"message": "MESSAGE",
"reason": "REASON",
}
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIsNone(job.error_result)
status = job._properties["status"] = {}
self.assertIsNone(job.error_result)
status["errorResult"] = error_result
self.assertEqual(job.error_result, error_result)
def test_errors(self):
errors = [
{
"debugInfo": "DEBUG INFO",
"location": "LOCATION",
"message": "MESSAGE",
"reason": "REASON",
}
]
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIsNone(job.errors)
status = job._properties["status"] = {}
self.assertIsNone(job.errors)
status["errors"] = errors
self.assertEqual(job.errors, errors)
def test_state(self):
state = "STATE"
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIsNone(job.state)
status = job._properties["status"] = {}
self.assertIsNone(job.state)
status["state"] = state
self.assertEqual(job.state, state)
def test__scrub_local_properties(self):
before = {"foo": "bar"}
resource = before.copy()
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
job._scrub_local_properties(resource) # no raise
self.assertEqual(resource, before)
def test__copy_configuration_properties(self):
before = {"foo": "bar"}
resource = before.copy()
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
with self.assertRaises(NotImplementedError):
job._copy_configuration_properties(resource)
self.assertEqual(resource, before)
def _set_properties_job(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
job._scrub_local_properties = mock.Mock()
job._copy_configuration_properties = mock.Mock()
job._set_future_result = mock.Mock()
job._properties = {
"jobReference": job._properties["jobReference"],
"foo": "bar",
}
return job
def test__set_properties_no_stats(self):
config = {"test": True}
resource = {"configuration": config}
job = self._set_properties_job()
job._set_properties(resource)
self.assertEqual(job._properties, resource)
job._scrub_local_properties.assert_called_once_with(resource)
job._copy_configuration_properties.assert_called_once_with(config)
def test__set_properties_w_creation_time(self):
now, millis = self._datetime_and_millis()
config = {"test": True}
stats = {"creationTime": str(millis)}
resource = {"configuration": config, "statistics": stats}
job = self._set_properties_job()
job._set_properties(resource)
cleaned = copy.deepcopy(resource)
cleaned["statistics"]["creationTime"] = float(millis)
self.assertEqual(job._properties, cleaned)
job._scrub_local_properties.assert_called_once_with(resource)
job._copy_configuration_properties.assert_called_once_with(config)
def test__set_properties_w_start_time(self):
now, millis = self._datetime_and_millis()
config = {"test": True}
stats = {"startTime": str(millis)}
resource = {"configuration": config, "statistics": stats}
job = self._set_properties_job()
job._set_properties(resource)
cleaned = copy.deepcopy(resource)
cleaned["statistics"]["startTime"] = float(millis)
self.assertEqual(job._properties, cleaned)
job._scrub_local_properties.assert_called_once_with(resource)
job._copy_configuration_properties.assert_called_once_with(config)
def test__set_properties_w_end_time(self):
now, millis = self._datetime_and_millis()
config = {"test": True}
stats = {"endTime": str(millis)}
resource = {"configuration": config, "statistics": stats}
job = self._set_properties_job()
job._set_properties(resource)
cleaned = copy.deepcopy(resource)
cleaned["statistics"]["endTime"] = float(millis)
self.assertEqual(job._properties, cleaned)
job._scrub_local_properties.assert_called_once_with(resource)
job._copy_configuration_properties.assert_called_once_with(config)
def test__get_resource_config_missing_job_ref(self):
resource = {}
klass = self._make_derived_class()
with self.assertRaises(KeyError):
klass._get_resource_config(resource)
def test__get_resource_config_missing_job_id(self):
resource = {"jobReference": {}}
klass = self._make_derived_class()
with self.assertRaises(KeyError):
klass._get_resource_config(resource)
def test__get_resource_config_missing_configuration(self):
resource = {"jobReference": {"jobId": self.JOB_ID}}
klass = self._make_derived_class()
with self.assertRaises(KeyError):
klass._get_resource_config(resource)
def test__get_resource_config_missing_config_type(self):
resource = {"jobReference": {"jobId": self.JOB_ID}, "configuration": {}}
klass = self._make_derived_class()
with self.assertRaises(KeyError):
klass._get_resource_config(resource)
def test__get_resource_config_ok(self):
derived_config = {"foo": "bar"}
resource = {
"jobReference": {"jobId": self.JOB_ID},
"configuration": {"derived": derived_config},
}
klass = self._make_derived_class()
job_id, config = klass._get_resource_config(resource)
self.assertEqual(job_id, self.JOB_ID)
self.assertEqual(config, {"derived": derived_config})
def test__build_resource(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
with self.assertRaises(NotImplementedError):
job._build_resource()
def test_to_api_repr(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
with self.assertRaises(NotImplementedError):
job.to_api_repr()
def test__begin_already(self):
job = self._set_properties_job()
job._properties["status"] = {"state": "WHATEVER"}
with self.assertRaises(ValueError):
job._begin()
def test__begin_defaults(self):
from google.cloud.bigquery.retry import DEFAULT_RETRY
resource = {
"jobReference": {
"jobId": self.JOB_ID,
"projectId": self.PROJECT,
"location": None,
},
"configuration": {"test": True},
}
job = self._set_properties_job()
builder = job.to_api_repr = mock.Mock()
builder.return_value = resource
call_api = job._client._call_api = mock.Mock()
call_api.return_value = resource
job._begin()
call_api.assert_called_once_with(
DEFAULT_RETRY,
method="POST",
path="/projects/{}/jobs".format(self.PROJECT),
data=resource,
timeout=None,
)
self.assertEqual(job._properties, resource)
def test__begin_explicit(self):
from google.cloud.bigquery.retry import DEFAULT_RETRY
other_project = "other-project-234"
resource = {
"jobReference": {
"jobId": self.JOB_ID,
"projectId": self.PROJECT,
"location": None,
},
"configuration": {"test": True},
}
job = self._set_properties_job()
builder = job.to_api_repr = mock.Mock()
builder.return_value = resource
client = _make_client(project=other_project)
call_api = client._call_api = mock.Mock()
call_api.return_value = resource
retry = DEFAULT_RETRY.with_deadline(1)
job._begin(client=client, retry=retry, timeout=7.5)
call_api.assert_called_once_with(
retry,
method="POST",
path="/projects/{}/jobs".format(self.PROJECT),
data=resource,
timeout=7.5,
)
self.assertEqual(job._properties, resource)
def test_exists_defaults_miss(self):
from google.cloud.exceptions import NotFound
from google.cloud.bigquery.retry import DEFAULT_RETRY
job = self._set_properties_job()
job._properties["jobReference"]["location"] = self.LOCATION
call_api = job._client._call_api = mock.Mock()
call_api.side_effect = NotFound("testing")
self.assertFalse(job.exists())
call_api.assert_called_once_with(
DEFAULT_RETRY,
method="GET",
path="/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID),
query_params={"fields": "id", "location": self.LOCATION},
timeout=None,
)
def test_exists_explicit_hit(self):
from google.cloud.bigquery.retry import DEFAULT_RETRY
other_project = "other-project-234"
resource = {
"jobReference": {
"jobId": self.JOB_ID,
"projectId": self.PROJECT,
"location": None,
},
"configuration": {"test": True},
}
job = self._set_properties_job()
client = _make_client(project=other_project)
call_api = client._call_api = mock.Mock()
call_api.return_value = resource
retry = DEFAULT_RETRY.with_deadline(1)
self.assertTrue(job.exists(client=client, retry=retry))
call_api.assert_called_once_with(
retry,
method="GET",
path="/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID),
query_params={"fields": "id"},
timeout=None,
)
def test_exists_w_timeout(self):
from google.cloud.bigquery.retry import DEFAULT_RETRY
PATH = "/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID)
job = self._set_properties_job()
call_api = job._client._call_api = mock.Mock()
job.exists(timeout=7.5)
call_api.assert_called_once_with(
DEFAULT_RETRY,
method="GET",
path=PATH,
query_params={"fields": "id"},
timeout=7.5,
)
def test_reload_defaults(self):
from google.cloud.bigquery.retry import DEFAULT_RETRY
resource = {
"jobReference": {
"jobId": self.JOB_ID,
"projectId": self.PROJECT,
"location": None,
},
"configuration": {"test": True},
}
job = self._set_properties_job()
job._properties["jobReference"]["location"] = self.LOCATION
call_api = job._client._call_api = mock.Mock()
call_api.return_value = resource
job.reload()
call_api.assert_called_once_with(
DEFAULT_RETRY,
method="GET",
path="/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID),
query_params={"location": self.LOCATION},
timeout=None,
)
self.assertEqual(job._properties, resource)
def test_reload_explicit(self):
from google.cloud.bigquery.retry import DEFAULT_RETRY
other_project = "other-project-234"
resource = {
"jobReference": {
"jobId": self.JOB_ID,
"projectId": self.PROJECT,
"location": None,
},
"configuration": {"test": True},
}
job = self._set_properties_job()
client = _make_client(project=other_project)
call_api = client._call_api = mock.Mock()
call_api.return_value = resource
retry = DEFAULT_RETRY.with_deadline(1)
job.reload(client=client, retry=retry, timeout=4.2)
call_api.assert_called_once_with(
retry,
method="GET",
path="/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID),
query_params={},
timeout=4.2,
)
self.assertEqual(job._properties, resource)
def test_cancel_defaults(self):
resource = {
"jobReference": {
"jobId": self.JOB_ID,
"projectId": self.PROJECT,
"location": None,
},
"configuration": {"test": True},
}
response = {"job": resource}
job = self._set_properties_job()
job._properties["jobReference"]["location"] = self.LOCATION
connection = job._client._connection = _make_connection(response)
self.assertTrue(job.cancel())
connection.api_request.assert_called_once_with(
method="POST",
path="/projects/{}/jobs/{}/cancel".format(self.PROJECT, self.JOB_ID),
query_params={"location": self.LOCATION},
timeout=None,
)
self.assertEqual(job._properties, resource)
def test_cancel_explicit(self):
other_project = "other-project-234"
resource = {
"jobReference": {
"jobId": self.JOB_ID,
"projectId": self.PROJECT,
"location": None,
},
"configuration": {"test": True},
}
response = {"job": resource}
job = self._set_properties_job()
client = _make_client(project=other_project)
connection = client._connection = _make_connection(response)
self.assertTrue(job.cancel(client=client, timeout=7.5))
connection.api_request.assert_called_once_with(
method="POST",
path="/projects/{}/jobs/{}/cancel".format(self.PROJECT, self.JOB_ID),
query_params={},
timeout=7.5,
)
self.assertEqual(job._properties, resource)
def test_cancel_w_custom_retry(self):
from google.cloud.bigquery.retry import DEFAULT_RETRY
api_path = "/projects/{}/jobs/{}/cancel".format(self.PROJECT, self.JOB_ID)
resource = {
"jobReference": {
"jobId": self.JOB_ID,
"projectId": self.PROJECT,
"location": None,
},
"configuration": {"test": True},
}
response = {"job": resource}
job = self._set_properties_job()
api_request_patcher = mock.patch.object(
job._client._connection, "api_request", side_effect=[ValueError, response],
)
retry = DEFAULT_RETRY.with_deadline(1).with_predicate(
lambda exc: isinstance(exc, ValueError)
)
with api_request_patcher as fake_api_request:
result = job.cancel(retry=retry, timeout=7.5)
self.assertTrue(result)
self.assertEqual(job._properties, resource)
self.assertEqual(
fake_api_request.call_args_list,
[
mock.call(method="POST", path=api_path, query_params={}, timeout=7.5),
mock.call(
method="POST", path=api_path, query_params={}, timeout=7.5,
), # was retried once
],
)
def test__set_future_result_wo_done(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
set_exception = job.set_exception = mock.Mock()
set_result = job.set_result = mock.Mock()
job._set_future_result()
set_exception.assert_not_called()
set_result.assert_not_called()
def test__set_future_result_w_result_set(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
job._properties["status"] = {"state": "DONE"}
job._result_set = True
set_exception = job.set_exception = mock.Mock()
set_result = job.set_result = mock.Mock()
job._set_future_result()
set_exception.assert_not_called()
set_result.assert_not_called()
def test__set_future_result_w_done_wo_result_set_w_error(self):
from google.cloud.exceptions import NotFound
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
job._properties["status"] = {
"state": "DONE",
"errorResult": {"reason": "notFound", "message": "testing"},
}
set_exception = job.set_exception = mock.Mock()
set_result = job.set_result = mock.Mock()
job._set_future_result()
set_exception.assert_called_once()
args, kw = set_exception.call_args
(exception,) = args
self.assertIsInstance(exception, NotFound)
self.assertEqual(exception.message, "testing")
self.assertEqual(kw, {})
set_result.assert_not_called()
def test__set_future_result_w_done_wo_result_set_wo_error(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
job._properties["status"] = {"state": "DONE"}
set_exception = job.set_exception = mock.Mock()
set_result = job.set_result = mock.Mock()
job._set_future_result()
set_exception.assert_not_called()
set_result.assert_called_once_with(job)
def test_done_defaults_wo_state(self):
from google.cloud.bigquery.retry import DEFAULT_RETRY
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
reload_ = job.reload = mock.Mock()
self.assertFalse(job.done())
reload_.assert_called_once_with(retry=DEFAULT_RETRY, timeout=None)
def test_done_explicit_wo_state(self):
from google.cloud.bigquery.retry import DEFAULT_RETRY
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
reload_ = job.reload = mock.Mock()
retry = DEFAULT_RETRY.with_deadline(1)
self.assertFalse(job.done(retry=retry, timeout=7.5))
reload_.assert_called_once_with(retry=retry, timeout=7.5)
def test_done_already(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
job._properties["status"] = {"state": "DONE"}
self.assertTrue(job.done())
@mock.patch("google.api_core.future.polling.PollingFuture.result")
def test_result_default_wo_state(self, result):
from google.cloud.bigquery.retry import DEFAULT_RETRY
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
begin = job._begin = mock.Mock()
self.assertIs(job.result(), result.return_value)
begin.assert_called_once_with(retry=DEFAULT_RETRY, timeout=None)
result.assert_called_once_with(timeout=None)
@mock.patch("google.api_core.future.polling.PollingFuture.result")
def test_result_w_retry_wo_state(self, result):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
begin = job._begin = mock.Mock()
retry = mock.Mock()
self.assertIs(job.result(retry=retry), result.return_value)
begin.assert_called_once_with(retry=retry, timeout=None)
result.assert_called_once_with(timeout=None)
@mock.patch("google.api_core.future.polling.PollingFuture.result")
def test_result_explicit_w_state(self, result):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
job._properties["status"] = {"state": "DONE"}
begin = job._begin = mock.Mock()
timeout = 1
self.assertIs(job.result(timeout=timeout), result.return_value)
begin.assert_not_called()
result.assert_called_once_with(timeout=timeout)
@mock.patch("google.api_core.future.polling.PollingFuture.result")
def test_result_splitting_timout_between_requests(self, result):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
begin = job._begin = mock.Mock()
retry = mock.Mock()
with freezegun.freeze_time("1970-01-01 00:00:00", tick=False) as frozen_time:
def delayed_begin(*args, **kwargs):
frozen_time.tick(delta=0.3)
begin.side_effect = delayed_begin
job.result(retry=retry, timeout=1.0)
begin.assert_called_once_with(retry=retry, timeout=1.0)
result.assert_called_once_with(timeout=0.7)
def test_cancelled_wo_error_result(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertFalse(job.cancelled())
def test_cancelled_w_error_result_not_stopped(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
job._properties["status"] = {"errorResult": {"reason": "other"}}
self.assertFalse(job.cancelled())
def test_cancelled_w_error_result_w_stopped(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
job._properties["status"] = {"errorResult": {"reason": "stopped"}}
self.assertTrue(job.cancelled())
class Test_JobConfig(unittest.TestCase):
JOB_TYPE = "testing"
@staticmethod
def _get_target_class():
from google.cloud.bigquery import job
return job._JobConfig
def _make_one(self, job_type=JOB_TYPE):
return self._get_target_class()(job_type)
def test_ctor(self):
job_config = self._make_one()
self.assertEqual(job_config._job_type, self.JOB_TYPE)
self.assertEqual(job_config._properties, {self.JOB_TYPE: {}})
def test_fill_from_default(self):
from google.cloud.bigquery import QueryJobConfig
job_config = QueryJobConfig()
job_config.dry_run = True
job_config.maximum_bytes_billed = 1000
default_job_config = QueryJobConfig()
default_job_config.use_query_cache = True
default_job_config.maximum_bytes_billed = 2000
final_job_config = job_config._fill_from_default(default_job_config)
self.assertTrue(final_job_config.dry_run)
self.assertTrue(final_job_config.use_query_cache)
self.assertEqual(final_job_config.maximum_bytes_billed, 1000)
def test_fill_from_default_conflict(self):
from google.cloud.bigquery import QueryJobConfig
basic_job_config = QueryJobConfig()
conflicting_job_config = self._make_one("conflicting_job_type")
self.assertNotEqual(
basic_job_config._job_type, conflicting_job_config._job_type
)
with self.assertRaises(TypeError):
basic_job_config._fill_from_default(conflicting_job_config)
@mock.patch("google.cloud.bigquery._helpers._get_sub_prop")
def test__get_sub_prop_wo_default(self, _get_sub_prop):
job_config = self._make_one()
key = "key"
self.assertIs(job_config._get_sub_prop(key), _get_sub_prop.return_value)
_get_sub_prop.assert_called_once_with(
job_config._properties, [self.JOB_TYPE, key], default=None
)
@mock.patch("google.cloud.bigquery._helpers._get_sub_prop")
def test__get_sub_prop_w_default(self, _get_sub_prop):
job_config = self._make_one()
key = "key"
default = "default"
self.assertIs(
job_config._get_sub_prop(key, default=default), _get_sub_prop.return_value
)
_get_sub_prop.assert_called_once_with(
job_config._properties, [self.JOB_TYPE, key], default=default
)
@mock.patch("google.cloud.bigquery._helpers._set_sub_prop")
def test__set_sub_prop(self, _set_sub_prop):
job_config = self._make_one()
key = "key"
value = "value"
job_config._set_sub_prop(key, value)
_set_sub_prop.assert_called_once_with(
job_config._properties, [self.JOB_TYPE, key], value
)
def test_to_api_repr(self):
job_config = self._make_one()
expected = job_config._properties = {self.JOB_TYPE: {"foo": "bar"}}
found = job_config.to_api_repr()
self.assertEqual(found, expected)
self.assertIsNot(found, expected) # copied
# 'from_api_repr' cannot be tested on '_JobConfig', because it presumes
# the ctor can be called w/o arguments
def test_labels_miss(self):
job_config = self._make_one()
self.assertEqual(job_config.labels, {})
def test_labels_update_in_place(self):
job_config = self._make_one()
labels = job_config.labels
labels["foo"] = "bar" # update in place
self.assertEqual(job_config.labels, {"foo": "bar"})
def test_labels_hit(self):
labels = {"foo": "bar"}
job_config = self._make_one()
job_config._properties["labels"] = labels
self.assertEqual(job_config.labels, labels)
def test_labels_setter_invalid(self):
labels = object()
job_config = self._make_one()
with self.assertRaises(ValueError):
job_config.labels = labels
def test_labels_setter(self):
labels = {"foo": "bar"}
job_config = self._make_one()
job_config.labels = labels
self.assertEqual(job_config._properties["labels"], labels)
class _Base(object):
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery.table import TableReference
ENDPOINT = "https://bigquery.googleapis.com"
PROJECT = "project"
SOURCE1 = "http://example.com/source1.csv"
DS_ID = "dataset_id"
DS_REF = DatasetReference(PROJECT, DS_ID)
TABLE_ID = "table_id"
TABLE_REF = TableReference(DS_REF, TABLE_ID)
JOB_ID = "JOB_ID"
KMS_KEY_NAME = "projects/1/locations/us/keyRings/1/cryptoKeys/1"
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def _setUpConstants(self):
import datetime
from google.cloud._helpers import UTC
self.WHEN_TS = 1437767599.006
self.WHEN = datetime.datetime.utcfromtimestamp(self.WHEN_TS).replace(tzinfo=UTC)
self.ETAG = "ETAG"
self.FULL_JOB_ID = "%s:%s" % (self.PROJECT, self.JOB_ID)
self.RESOURCE_URL = "{}/bigquery/v2/projects/{}/jobs/{}".format(
self.ENDPOINT, self.PROJECT, self.JOB_ID
)
self.USER_EMAIL = "phred@example.com"
def _table_ref(self, table_id):
from google.cloud.bigquery.table import TableReference
return TableReference(self.DS_REF, table_id)
def _make_resource(self, started=False, ended=False):
self._setUpConstants()
return _make_job_resource(
creation_time_ms=int(self.WHEN_TS * 1000),
started_time_ms=int(self.WHEN_TS * 1000),
ended_time_ms=int(self.WHEN_TS * 1000) + 1000000,
started=started,
ended=ended,
etag=self.ETAG,
endpoint=self.ENDPOINT,
job_type=self.JOB_TYPE,
job_id=self.JOB_ID,
project_id=self.PROJECT,
user_email=self.USER_EMAIL,
)
def _verifyInitialReadonlyProperties(self, job):
# root elements of resource
self.assertIsNone(job.etag)
self.assertIsNone(job.self_link)
self.assertIsNone(job.user_email)
# derived from resource['statistics']
self.assertIsNone(job.created)
self.assertIsNone(job.started)
self.assertIsNone(job.ended)
# derived from resource['status']
self.assertIsNone(job.error_result)
self.assertIsNone(job.errors)
self.assertIsNone(job.state)
def _verifyReadonlyResourceProperties(self, job, resource):
from datetime import timedelta
statistics = resource.get("statistics", {})
if "creationTime" in statistics:
self.assertEqual(job.created, self.WHEN)
else:
self.assertIsNone(job.created)
if "startTime" in statistics:
self.assertEqual(job.started, self.WHEN)
else:
self.assertIsNone(job.started)
if "endTime" in statistics:
self.assertEqual(job.ended, self.WHEN + timedelta(seconds=1000))
else:
self.assertIsNone(job.ended)
if "etag" in resource:
self.assertEqual(job.etag, self.ETAG)
else:
self.assertIsNone(job.etag)
if "selfLink" in resource:
self.assertEqual(job.self_link, self.RESOURCE_URL)
else:
self.assertIsNone(job.self_link)
if "user_email" in resource:
self.assertEqual(job.user_email, self.USER_EMAIL)
else:
self.assertIsNone(job.user_email)
class TestLoadJobConfig(unittest.TestCase, _Base):
JOB_TYPE = "load"
@staticmethod
def _get_target_class():
from google.cloud.bigquery.job import LoadJobConfig
return LoadJobConfig
def test_ctor_w_properties(self):
config = self._get_target_class()(
allow_jagged_rows=True, allow_quoted_newlines=True
)
self.assertTrue(config.allow_jagged_rows)
self.assertTrue(config.allow_quoted_newlines)
def test_allow_jagged_rows_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.allow_jagged_rows)
def test_allow_jagged_rows_hit(self):
config = self._get_target_class()()
config._properties["load"]["allowJaggedRows"] = True
self.assertTrue(config.allow_jagged_rows)
def test_allow_jagged_rows_setter(self):
config = self._get_target_class()()
config.allow_jagged_rows = True
self.assertTrue(config._properties["load"]["allowJaggedRows"])
def test_allow_quoted_newlines_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.allow_quoted_newlines)
def test_allow_quoted_newlines_hit(self):
config = self._get_target_class()()
config._properties["load"]["allowQuotedNewlines"] = True
self.assertTrue(config.allow_quoted_newlines)
def test_allow_quoted_newlines_setter(self):
config = self._get_target_class()()
config.allow_quoted_newlines = True
self.assertTrue(config._properties["load"]["allowQuotedNewlines"])
def test_autodetect_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.autodetect)
def test_autodetect_hit(self):
config = self._get_target_class()()
config._properties["load"]["autodetect"] = True
self.assertTrue(config.autodetect)
def test_autodetect_setter(self):
config = self._get_target_class()()
config.autodetect = True
self.assertTrue(config._properties["load"]["autodetect"])
def test_clustering_fields_miss(self):
config = self._get_target_class()()
self.assertIsNone(config.clustering_fields)
def test_clustering_fields_hit(self):
config = self._get_target_class()()
fields = ["email", "postal_code"]
config._properties["load"]["clustering"] = {"fields": fields}
self.assertEqual(config.clustering_fields, fields)
def test_clustering_fields_setter(self):
fields = ["email", "postal_code"]
config = self._get_target_class()()
config.clustering_fields = fields
self.assertEqual(config._properties["load"]["clustering"], {"fields": fields})
def test_clustering_fields_setter_w_none(self):
config = self._get_target_class()()
fields = ["email", "postal_code"]
config._properties["load"]["clustering"] = {"fields": fields}
config.clustering_fields = None
self.assertIsNone(config.clustering_fields)
self.assertNotIn("clustering", config._properties["load"])
def test_create_disposition_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.create_disposition)
def test_create_disposition_hit(self):
from google.cloud.bigquery.job import CreateDisposition
disposition = CreateDisposition.CREATE_IF_NEEDED
config = self._get_target_class()()
config._properties["load"]["createDisposition"] = disposition
self.assertEqual(config.create_disposition, disposition)
def test_create_disposition_setter(self):
from google.cloud.bigquery.job import CreateDisposition
disposition = CreateDisposition.CREATE_IF_NEEDED
config = self._get_target_class()()
config.create_disposition = disposition
self.assertEqual(config._properties["load"]["createDisposition"], disposition)
def test_destination_encryption_configuration_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.destination_encryption_configuration)
def test_destination_encryption_configuration_hit(self):
from google.cloud.bigquery.encryption_configuration import (
EncryptionConfiguration,
)
kms_key_name = "kms-key-name"
encryption_configuration = EncryptionConfiguration(kms_key_name)
config = self._get_target_class()()
config._properties["load"]["destinationEncryptionConfiguration"] = {
"kmsKeyName": kms_key_name
}
self.assertEqual(
config.destination_encryption_configuration, encryption_configuration
)
def test_destination_encryption_configuration_setter(self):
from google.cloud.bigquery.encryption_configuration import (
EncryptionConfiguration,
)
kms_key_name = "kms-key-name"
encryption_configuration = EncryptionConfiguration(kms_key_name)
config = self._get_target_class()()
config.destination_encryption_configuration = encryption_configuration
expected = {"kmsKeyName": kms_key_name}
self.assertEqual(
config._properties["load"]["destinationEncryptionConfiguration"], expected
)
def test_destination_encryption_configuration_setter_w_none(self):
kms_key_name = "kms-key-name"
config = self._get_target_class()()
config._properties["load"]["destinationEncryptionConfiguration"] = {
"kmsKeyName": kms_key_name
}
config.destination_encryption_configuration = None
self.assertIsNone(config.destination_encryption_configuration)
self.assertNotIn(
"destinationEncryptionConfiguration", config._properties["load"]
)
def test_destination_table_description_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.destination_table_description)
def test_destination_table_description_hit(self):
description = "Description"
config = self._get_target_class()()
config._properties["load"]["destinationTableProperties"] = {
"description": description
}
self.assertEqual(config.destination_table_description, description)
def test_destination_table_description_setter(self):
description = "Description"
config = self._get_target_class()()
config.destination_table_description = description
expected = {"description": description}
self.assertEqual(
config._properties["load"]["destinationTableProperties"], expected
)
def test_destination_table_description_setter_w_fn_already(self):
description = "Description"
friendly_name = "Friendly Name"
config = self._get_target_class()()
config._properties["load"]["destinationTableProperties"] = {
"friendlyName": friendly_name
}
config.destination_table_description = description
expected = {"friendlyName": friendly_name, "description": description}
self.assertEqual(
config._properties["load"]["destinationTableProperties"], expected
)
def test_destination_table_description_w_none(self):
description = "Description"
friendly_name = "Friendly Name"
config = self._get_target_class()()
config._properties["load"]["destinationTableProperties"] = {
"description": description,
"friendlyName": friendly_name,
}
config.destination_table_description = None
expected = {"friendlyName": friendly_name}
self.assertEqual(
config._properties["load"]["destinationTableProperties"], expected
)
def test_destination_table_friendly_name_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.destination_table_friendly_name)
def test_destination_table_friendly_name_hit(self):
friendly_name = "Friendly Name"
config = self._get_target_class()()
config._properties["load"]["destinationTableProperties"] = {
"friendlyName": friendly_name
}
self.assertEqual(config.destination_table_friendly_name, friendly_name)
def test_destination_table_friendly_name_setter(self):
friendly_name = "Friendly Name"
config = self._get_target_class()()
config.destination_table_friendly_name = friendly_name
expected = {"friendlyName": friendly_name}
self.assertEqual(
config._properties["load"]["destinationTableProperties"], expected
)
def test_destination_table_friendly_name_setter_w_descr_already(self):
friendly_name = "Friendly Name"
description = "Description"
config = self._get_target_class()()
config._properties["load"]["destinationTableProperties"] = {
"description": description
}
config.destination_table_friendly_name = friendly_name
expected = {"friendlyName": friendly_name, "description": description}
self.assertEqual(
config._properties["load"]["destinationTableProperties"], expected
)
def test_destination_table_friendly_name_w_none(self):
friendly_name = "Friendly Name"
description = "Description"
config = self._get_target_class()()
config._properties["load"]["destinationTableProperties"] = {
"description": description,
"friendlyName": friendly_name,
}
config.destination_table_friendly_name = None
expected = {"description": description}
self.assertEqual(
config._properties["load"]["destinationTableProperties"], expected
)
def test_encoding_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.encoding)
def test_encoding_hit(self):
from google.cloud.bigquery.job import Encoding
encoding = Encoding.UTF_8
config = self._get_target_class()()
config._properties["load"]["encoding"] = encoding
self.assertEqual(config.encoding, encoding)
def test_encoding_setter(self):
from google.cloud.bigquery.job import Encoding
encoding = Encoding.UTF_8
config = self._get_target_class()()
config.encoding = encoding
self.assertEqual(config._properties["load"]["encoding"], encoding)
def test_field_delimiter_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.field_delimiter)
def test_field_delimiter_hit(self):
field_delimiter = "|"
config = self._get_target_class()()
config._properties["load"]["fieldDelimiter"] = field_delimiter
self.assertEqual(config.field_delimiter, field_delimiter)
def test_field_delimiter_setter(self):
field_delimiter = "|"
config = self._get_target_class()()
config.field_delimiter = field_delimiter
self.assertEqual(config._properties["load"]["fieldDelimiter"], field_delimiter)
def test_hive_partitioning_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.hive_partitioning)
def test_hive_partitioning_hit(self):
from google.cloud.bigquery.external_config import HivePartitioningOptions
config = self._get_target_class()()
config._properties["load"]["hivePartitioningOptions"] = {
"sourceUriPrefix": "http://foo/bar",
"mode": "STRINGS",
}
result = config.hive_partitioning
self.assertIsInstance(result, HivePartitioningOptions)
self.assertEqual(result.source_uri_prefix, "http://foo/bar")
self.assertEqual(result.mode, "STRINGS")
def test_hive_partitioning_setter(self):
from google.cloud.bigquery.external_config import HivePartitioningOptions
hive_partitioning = HivePartitioningOptions()
hive_partitioning.source_uri_prefix = "http://foo/bar"
hive_partitioning.mode = "AUTO"
config = self._get_target_class()()
config.hive_partitioning = hive_partitioning
self.assertEqual(
config._properties["load"]["hivePartitioningOptions"],
{"sourceUriPrefix": "http://foo/bar", "mode": "AUTO"},
)
config.hive_partitioning = None
self.assertIsNone(config._properties["load"]["hivePartitioningOptions"])
def test_hive_partitioning_invalid_type(self):
config = self._get_target_class()()
with self.assertRaises(TypeError):
config.hive_partitioning = {"mode": "AUTO"}
def test_ignore_unknown_values_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.ignore_unknown_values)
def test_ignore_unknown_values_hit(self):
config = self._get_target_class()()
config._properties["load"]["ignoreUnknownValues"] = True
self.assertTrue(config.ignore_unknown_values)
def test_ignore_unknown_values_setter(self):
config = self._get_target_class()()
config.ignore_unknown_values = True
self.assertTrue(config._properties["load"]["ignoreUnknownValues"])
def test_max_bad_records_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.max_bad_records)
def test_max_bad_records_hit(self):
max_bad_records = 13
config = self._get_target_class()()
config._properties["load"]["maxBadRecords"] = max_bad_records
self.assertEqual(config.max_bad_records, max_bad_records)
def test_max_bad_records_setter(self):
max_bad_records = 13
config = self._get_target_class()()
config.max_bad_records = max_bad_records
self.assertEqual(config._properties["load"]["maxBadRecords"], max_bad_records)
def test_null_marker_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.null_marker)
def test_null_marker_hit(self):
null_marker = "XXX"
config = self._get_target_class()()
config._properties["load"]["nullMarker"] = null_marker
self.assertEqual(config.null_marker, null_marker)
def test_null_marker_setter(self):
null_marker = "XXX"
config = self._get_target_class()()
config.null_marker = null_marker
self.assertEqual(config._properties["load"]["nullMarker"], null_marker)
def test_quote_character_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.quote_character)
def test_quote_character_hit(self):
quote_character = "'"
config = self._get_target_class()()
config._properties["load"]["quote"] = quote_character
self.assertEqual(config.quote_character, quote_character)
def test_quote_character_setter(self):
quote_character = "'"
config = self._get_target_class()()
config.quote_character = quote_character
self.assertEqual(config._properties["load"]["quote"], quote_character)
def test_schema_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.schema)
def test_schema_hit(self):
from google.cloud.bigquery.schema import SchemaField
config = self._get_target_class()()
all_props_repr = {
"mode": "REQUIRED",
"name": "foo",
"type": "INTEGER",
"description": "Foo",
}
minimal_repr = {"name": "bar", "type": "STRING"}
config._properties["load"]["schema"] = {
"fields": [all_props_repr, minimal_repr]
}
all_props, minimal = config.schema
self.assertEqual(all_props, SchemaField.from_api_repr(all_props_repr))
self.assertEqual(minimal, SchemaField.from_api_repr(minimal_repr))
def test_schema_setter_fields(self):
from google.cloud.bigquery.schema import SchemaField
config = self._get_target_class()()
full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
age = SchemaField("age", "INTEGER", mode="REQUIRED")
config.schema = [full_name, age]
full_name_repr = {
"name": "full_name",
"type": "STRING",
"mode": "REQUIRED",
"description": None,
}
age_repr = {
"name": "age",
"type": "INTEGER",
"mode": "REQUIRED",
"description": None,
}
self.assertEqual(
config._properties["load"]["schema"], {"fields": [full_name_repr, age_repr]}
)
def test_schema_setter_valid_mappings_list(self):
config = self._get_target_class()()
schema = [
{"name": "full_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "age", "type": "INTEGER", "mode": "REQUIRED"},
]
config.schema = schema
full_name_repr = {
"name": "full_name",
"type": "STRING",
"mode": "REQUIRED",
"description": None,
}
age_repr = {
"name": "age",
"type": "INTEGER",
"mode": "REQUIRED",
"description": None,
}
self.assertEqual(
config._properties["load"]["schema"], {"fields": [full_name_repr, age_repr]}
)
def test_schema_setter_invalid_mappings_list(self):
config = self._get_target_class()()
schema = [
{"name": "full_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "age", "typeoo": "INTEGER", "mode": "REQUIRED"},
]
with self.assertRaises(Exception):
config.schema = schema
def test_schema_setter_unsetting_schema(self):
from google.cloud.bigquery.schema import SchemaField
config = self._get_target_class()()
config._properties["load"]["schema"] = [
SchemaField("full_name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
]
config.schema = None
self.assertNotIn("schema", config._properties["load"])
config.schema = None # no error, idempotent operation
def test_schema_update_options_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.schema_update_options)
def test_schema_update_options_hit(self):
from google.cloud.bigquery.job import SchemaUpdateOption
options = [
SchemaUpdateOption.ALLOW_FIELD_ADDITION,
SchemaUpdateOption.ALLOW_FIELD_RELAXATION,
]
config = self._get_target_class()()
config._properties["load"]["schemaUpdateOptions"] = options
self.assertEqual(config.schema_update_options, options)
def test_schema_update_options_setter(self):
from google.cloud.bigquery.job import SchemaUpdateOption
options = [
SchemaUpdateOption.ALLOW_FIELD_ADDITION,
SchemaUpdateOption.ALLOW_FIELD_RELAXATION,
]
config = self._get_target_class()()
config.schema_update_options = options
self.assertEqual(config._properties["load"]["schemaUpdateOptions"], options)
def test_skip_leading_rows_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.skip_leading_rows)
def test_skip_leading_rows_hit_w_str(self):
skip_leading_rows = 1
config = self._get_target_class()()
config._properties["load"]["skipLeadingRows"] = str(skip_leading_rows)
self.assertEqual(config.skip_leading_rows, skip_leading_rows)
def test_skip_leading_rows_hit_w_integer(self):
skip_leading_rows = 1
config = self._get_target_class()()
config._properties["load"]["skipLeadingRows"] = skip_leading_rows
self.assertEqual(config.skip_leading_rows, skip_leading_rows)
def test_skip_leading_rows_setter(self):
skip_leading_rows = 1
config = self._get_target_class()()
config.skip_leading_rows = skip_leading_rows
self.assertEqual(
config._properties["load"]["skipLeadingRows"], str(skip_leading_rows)
)
def test_source_format_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.source_format)
def test_source_format_hit(self):
from google.cloud.bigquery.job import SourceFormat
source_format = SourceFormat.CSV
config = self._get_target_class()()
config._properties["load"]["sourceFormat"] = source_format
self.assertEqual(config.source_format, source_format)
def test_source_format_setter(self):
from google.cloud.bigquery.job import SourceFormat
source_format = SourceFormat.CSV
config = self._get_target_class()()
config.source_format = source_format
self.assertEqual(config._properties["load"]["sourceFormat"], source_format)
def test_range_partitioning_w_none(self):
object_under_test = self._get_target_class()()
assert object_under_test.range_partitioning is None
def test_range_partitioning_w_value(self):
object_under_test = self._get_target_class()()
object_under_test._properties["load"]["rangePartitioning"] = {
"field": "column_one",
"range": {"start": 1, "end": 1000, "interval": 10},
}
object_under_test.range_partitioning.field == "column_one"
object_under_test.range_partitioning.range_.start == 1
object_under_test.range_partitioning.range_.end == 1000
object_under_test.range_partitioning.range_.interval == 10
def test_range_partitioning_setter(self):
from google.cloud.bigquery.table import PartitionRange
from google.cloud.bigquery.table import RangePartitioning
object_under_test = self._get_target_class()()
object_under_test.range_partitioning = RangePartitioning(
field="column_one", range_=PartitionRange(start=1, end=1000, interval=10)
)
object_under_test.range_partitioning.field == "column_one"
object_under_test.range_partitioning.range_.start == 1
object_under_test.range_partitioning.range_.end == 1000
object_under_test.range_partitioning.range_.interval == 10
def test_range_partitioning_setter_w_none(self):
object_under_test = self._get_target_class()()
object_under_test.range_partitioning = None
assert object_under_test.range_partitioning is None
def test_range_partitioning_setter_w_wrong_type(self):
object_under_test = self._get_target_class()()
with pytest.raises(ValueError, match="RangePartitioning"):
object_under_test.range_partitioning = object()
def test_time_partitioning_miss(self):
config = self._get_target_class()()
self.assertIsNone(config.time_partitioning)
def test_time_partitioning_hit(self):
from google.cloud.bigquery.table import TimePartitioning
from google.cloud.bigquery.table import TimePartitioningType
field = "creation_date"
year_ms = 86400 * 1000 * 365
config = self._get_target_class()()
config._properties["load"]["timePartitioning"] = {
"type": TimePartitioningType.DAY,
"field": field,
"expirationMs": str(year_ms),
"requirePartitionFilter": False,
}
expected = TimePartitioning(
type_=TimePartitioningType.DAY,
field=field,
expiration_ms=year_ms,
require_partition_filter=False,
)
self.assertEqual(config.time_partitioning, expected)
def test_time_partitioning_setter(self):
from google.cloud.bigquery.table import TimePartitioning
from google.cloud.bigquery.table import TimePartitioningType
field = "creation_date"
year_ms = 86400 * 1000 * 365
time_partitioning = TimePartitioning(
type_=TimePartitioningType.DAY,
field=field,
expiration_ms=year_ms,
require_partition_filter=False,
)
config = self._get_target_class()()
config.time_partitioning = time_partitioning
expected = {
"type": TimePartitioningType.DAY,
"field": field,
"expirationMs": str(year_ms),
"requirePartitionFilter": False,
}
self.assertEqual(config._properties["load"]["timePartitioning"], expected)
def test_time_partitioning_setter_w_none(self):
from google.cloud.bigquery.table import TimePartitioningType
field = "creation_date"
year_ms = 86400 * 1000 * 365
config = self._get_target_class()()
config._properties["load"]["timePartitioning"] = {
"type": TimePartitioningType.DAY,
"field": field,
"expirationMs": str(year_ms),
"requirePartitionFilter": False,
}
config.time_partitioning = None
self.assertIsNone(config.time_partitioning)
self.assertNotIn("timePartitioning", config._properties["load"])
def test_use_avro_logical_types(self):
config = self._get_target_class()()
self.assertIsNone(config.use_avro_logical_types)
def test_use_avro_logical_types_setter(self):
config = self._get_target_class()()
config.use_avro_logical_types = True
self.assertTrue(config._properties["load"]["useAvroLogicalTypes"])
def test_write_disposition_missing(self):
config = self._get_target_class()()
self.assertIsNone(config.write_disposition)
def test_write_disposition_hit(self):
from google.cloud.bigquery.job import WriteDisposition
write_disposition = WriteDisposition.WRITE_TRUNCATE
config = self._get_target_class()()
config._properties["load"]["writeDisposition"] = write_disposition
self.assertEqual(config.write_disposition, write_disposition)
def test_write_disposition_setter(self):
from google.cloud.bigquery.job import WriteDisposition
write_disposition = WriteDisposition.WRITE_TRUNCATE
config = self._get_target_class()()
config.write_disposition = write_disposition
self.assertEqual(
config._properties["load"]["writeDisposition"], write_disposition
)
class TestLoadJob(unittest.TestCase, _Base):
JOB_TYPE = "load"
@staticmethod
def _get_target_class():
from google.cloud.bigquery.job import LoadJob
return LoadJob
def _setUpConstants(self):
super(TestLoadJob, self)._setUpConstants()
self.INPUT_FILES = 2
self.INPUT_BYTES = 12345
self.OUTPUT_BYTES = 23456
self.OUTPUT_ROWS = 345
def _make_resource(self, started=False, ended=False):
resource = super(TestLoadJob, self)._make_resource(started, ended)
config = resource["configuration"]["load"]
config["sourceUris"] = [self.SOURCE1]
config["destinationTable"] = {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.TABLE_ID,
}
if ended:
resource["status"] = {"state": "DONE"}
resource["statistics"]["load"]["inputFiles"] = self.INPUT_FILES
resource["statistics"]["load"]["inputFileBytes"] = self.INPUT_BYTES
resource["statistics"]["load"]["outputBytes"] = self.OUTPUT_BYTES
resource["statistics"]["load"]["outputRows"] = self.OUTPUT_ROWS
return resource
def _verifyBooleanConfigProperties(self, job, config):
if "allowJaggedRows" in config:
self.assertEqual(job.allow_jagged_rows, config["allowJaggedRows"])
else:
self.assertIsNone(job.allow_jagged_rows)
if "allowQuotedNewlines" in config:
self.assertEqual(job.allow_quoted_newlines, config["allowQuotedNewlines"])
else:
self.assertIsNone(job.allow_quoted_newlines)
if "autodetect" in config:
self.assertEqual(job.autodetect, config["autodetect"])
else:
self.assertIsNone(job.autodetect)
if "ignoreUnknownValues" in config:
self.assertEqual(job.ignore_unknown_values, config["ignoreUnknownValues"])
else:
self.assertIsNone(job.ignore_unknown_values)
if "useAvroLogicalTypes" in config:
self.assertEqual(job.use_avro_logical_types, config["useAvroLogicalTypes"])
else:
self.assertIsNone(job.use_avro_logical_types)
def _verifyEnumConfigProperties(self, job, config):
if "createDisposition" in config:
self.assertEqual(job.create_disposition, config["createDisposition"])
else:
self.assertIsNone(job.create_disposition)
if "encoding" in config:
self.assertEqual(job.encoding, config["encoding"])
else:
self.assertIsNone(job.encoding)
if "sourceFormat" in config:
self.assertEqual(job.source_format, config["sourceFormat"])
else:
self.assertIsNone(job.source_format)
if "writeDisposition" in config:
self.assertEqual(job.write_disposition, config["writeDisposition"])
else:
self.assertIsNone(job.write_disposition)
if "schemaUpdateOptions" in config:
self.assertEqual(job.schema_update_options, config["schemaUpdateOptions"])
else:
self.assertIsNone(job.schema_update_options)
def _verifyResourceProperties(self, job, resource):
self._verifyReadonlyResourceProperties(job, resource)
config = resource.get("configuration", {}).get("load")
self._verifyBooleanConfigProperties(job, config)
self._verifyEnumConfigProperties(job, config)
self.assertEqual(job.source_uris, config["sourceUris"])
table_ref = config["destinationTable"]
self.assertEqual(job.destination.project, table_ref["projectId"])
self.assertEqual(job.destination.dataset_id, table_ref["datasetId"])
self.assertEqual(job.destination.table_id, table_ref["tableId"])
if "fieldDelimiter" in config:
self.assertEqual(job.field_delimiter, config["fieldDelimiter"])
else:
self.assertIsNone(job.field_delimiter)
if "maxBadRecords" in config:
self.assertEqual(job.max_bad_records, config["maxBadRecords"])
else:
self.assertIsNone(job.max_bad_records)
if "nullMarker" in config:
self.assertEqual(job.null_marker, config["nullMarker"])
else:
self.assertIsNone(job.null_marker)
if "quote" in config:
self.assertEqual(job.quote_character, config["quote"])
else:
self.assertIsNone(job.quote_character)
if "skipLeadingRows" in config:
self.assertEqual(str(job.skip_leading_rows), config["skipLeadingRows"])
else:
self.assertIsNone(job.skip_leading_rows)
if "destinationEncryptionConfiguration" in config:
self.assertIsNotNone(job.destination_encryption_configuration)
self.assertEqual(
job.destination_encryption_configuration.kms_key_name,
config["destinationEncryptionConfiguration"]["kmsKeyName"],
)
else:
self.assertIsNone(job.destination_encryption_configuration)
def test_ctor(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client)
self.assertIs(job.destination, self.TABLE_REF)
self.assertEqual(list(job.source_uris), [self.SOURCE1])
self.assertIs(job._client, client)
self.assertEqual(job.job_type, self.JOB_TYPE)
self.assertEqual(job.path, "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID))
self._verifyInitialReadonlyProperties(job)
# derived from resource['statistics']['load']
self.assertIsNone(job.input_file_bytes)
self.assertIsNone(job.input_files)
self.assertIsNone(job.output_bytes)
self.assertIsNone(job.output_rows)
# set/read from resource['configuration']['load']
self.assertIsNone(job.schema)
self.assertIsNone(job.allow_jagged_rows)
self.assertIsNone(job.allow_quoted_newlines)
self.assertIsNone(job.autodetect)
self.assertIsNone(job.create_disposition)
self.assertIsNone(job.encoding)
self.assertIsNone(job.field_delimiter)
self.assertIsNone(job.ignore_unknown_values)
self.assertIsNone(job.max_bad_records)
self.assertIsNone(job.null_marker)
self.assertIsNone(job.quote_character)
self.assertIsNone(job.skip_leading_rows)
self.assertIsNone(job.source_format)
self.assertIsNone(job.write_disposition)
self.assertIsNone(job.destination_encryption_configuration)
self.assertIsNone(job.destination_table_description)
self.assertIsNone(job.destination_table_friendly_name)
self.assertIsNone(job.range_partitioning)
self.assertIsNone(job.time_partitioning)
self.assertIsNone(job.use_avro_logical_types)
self.assertIsNone(job.clustering_fields)
self.assertIsNone(job.schema_update_options)
def test_ctor_w_config(self):
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.job import LoadJobConfig
client = _make_client(project=self.PROJECT)
full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
age = SchemaField("age", "INTEGER", mode="REQUIRED")
config = LoadJobConfig()
config.schema = [full_name, age]
job = self._make_one(
self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client, config
)
self.assertEqual(job.schema, [full_name, age])
config.destination_table_description = "Description"
expected = {"description": "Description"}
self.assertEqual(
config._properties["load"]["destinationTableProperties"], expected
)
friendly_name = "Friendly Name"
config._properties["load"]["destinationTableProperties"] = {
"friendlyName": friendly_name
}
self.assertEqual(config.destination_table_friendly_name, friendly_name)
def test_ctor_w_job_reference(self):
from google.cloud.bigquery import job
client = _make_client(project=self.PROJECT)
job_ref = job._JobReference(self.JOB_ID, "alternative-project", "US")
load_job = self._make_one(job_ref, [self.SOURCE1], self.TABLE_REF, client)
self.assertEqual(load_job.project, "alternative-project")
self.assertEqual(load_job.location, "US")
def test_done(self):
client = _make_client(project=self.PROJECT)
resource = self._make_resource(ended=True)
job = self._get_target_class().from_api_repr(resource, client)
self.assertTrue(job.done())
def test_result(self):
client = _make_client(project=self.PROJECT)
resource = self._make_resource(ended=True)
job = self._get_target_class().from_api_repr(resource, client)
result = job.result()
self.assertIs(result, job)
def test_result_invokes_begin(self):
begun_resource = self._make_resource()
done_resource = copy.deepcopy(begun_resource)
done_resource["status"] = {"state": "DONE"}
connection = _make_connection(begun_resource, done_resource)
client = _make_client(self.PROJECT)
client._connection = connection
job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client)
job.result()
self.assertEqual(len(connection.api_request.call_args_list), 2)
begin_request, reload_request = connection.api_request.call_args_list
self.assertEqual(begin_request[1]["method"], "POST")
self.assertEqual(reload_request[1]["method"], "GET")
def test_schema_setter_non_list(self):
from google.cloud.bigquery.job import LoadJobConfig
config = LoadJobConfig()
with self.assertRaises(TypeError):
config.schema = object()
def test_schema_setter_invalid_field(self):
from google.cloud.bigquery.job import LoadJobConfig
from google.cloud.bigquery.schema import SchemaField
config = LoadJobConfig()
full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
with self.assertRaises(ValueError):
config.schema = [full_name, object()]
def test_schema_setter(self):
from google.cloud.bigquery.job import LoadJobConfig
from google.cloud.bigquery.schema import SchemaField
config = LoadJobConfig()
full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
age = SchemaField("age", "INTEGER", mode="REQUIRED")
config.schema = [full_name, age]
self.assertEqual(config.schema, [full_name, age])
def test_props_set_by_server(self):
import datetime
from google.cloud._helpers import UTC
from google.cloud._helpers import _millis
CREATED = datetime.datetime(2015, 8, 11, 12, 13, 22, tzinfo=UTC)
STARTED = datetime.datetime(2015, 8, 11, 13, 47, 15, tzinfo=UTC)
ENDED = datetime.datetime(2015, 8, 11, 14, 47, 15, tzinfo=UTC)
FULL_JOB_ID = "%s:%s" % (self.PROJECT, self.JOB_ID)
URL = "http://example.com/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
EMAIL = "phred@example.com"
ERROR_RESULT = {
"debugInfo": "DEBUG",
"location": "LOCATION",
"message": "MESSAGE",
"reason": "REASON",
}
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client)
job._properties["etag"] = "ETAG"
job._properties["id"] = FULL_JOB_ID
job._properties["selfLink"] = URL
job._properties["user_email"] = EMAIL
statistics = job._properties["statistics"] = {}
statistics["creationTime"] = _millis(CREATED)
statistics["startTime"] = _millis(STARTED)
statistics["endTime"] = _millis(ENDED)
self.assertEqual(job.etag, "ETAG")
self.assertEqual(job.self_link, URL)
self.assertEqual(job.user_email, EMAIL)
self.assertEqual(job.created, CREATED)
self.assertEqual(job.started, STARTED)
self.assertEqual(job.ended, ENDED)
# running jobs have no load stats not yet set.
self.assertIsNone(job.output_bytes)
load_stats = statistics["load"] = {}
load_stats["inputFileBytes"] = 12345
load_stats["inputFiles"] = 1
load_stats["outputBytes"] = 23456
load_stats["outputRows"] = 345
self.assertEqual(job.input_file_bytes, 12345)
self.assertEqual(job.input_files, 1)
self.assertEqual(job.output_bytes, 23456)
self.assertEqual(job.output_rows, 345)
status = job._properties["status"] = {}
self.assertIsNone(job.error_result)
self.assertIsNone(job.errors)
self.assertIsNone(job.state)
status["errorResult"] = ERROR_RESULT
status["errors"] = [ERROR_RESULT]
status["state"] = "STATE"
self.assertEqual(job.error_result, ERROR_RESULT)
self.assertEqual(job.errors, [ERROR_RESULT])
self.assertEqual(job.state, "STATE")
def test_from_api_repr_missing_identity(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {}
klass = self._get_target_class()
with self.assertRaises(KeyError):
klass.from_api_repr(RESOURCE, client=client)
def test_from_api_repr_missing_config(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
"id": "%s:%s" % (self.PROJECT, self.JOB_ID),
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
}
klass = self._get_target_class()
with self.assertRaises(KeyError):
klass.from_api_repr(RESOURCE, client=client)
def test_from_api_repr_bare(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
"id": self.FULL_JOB_ID,
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {
"load": {
"sourceUris": [self.SOURCE1],
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.TABLE_ID,
},
}
},
}
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_from_api_with_encryption(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
"id": self.FULL_JOB_ID,
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {
"load": {
"sourceUris": [self.SOURCE1],
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.TABLE_ID,
},
"destinationEncryptionConfiguration": {
"kmsKeyName": self.KMS_KEY_NAME
},
}
},
}
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_from_api_repr_w_properties(self):
from google.cloud.bigquery.job import CreateDisposition
client = _make_client(project=self.PROJECT)
RESOURCE = self._make_resource()
load_config = RESOURCE["configuration"]["load"]
load_config["createDisposition"] = CreateDisposition.CREATE_IF_NEEDED
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_already_running(self):
conn = _make_connection()
client = _make_client(project=self.PROJECT, connection=conn)
job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client)
job._properties["status"] = {"state": "RUNNING"}
with self.assertRaises(ValueError):
job._begin()
def test_begin_w_bound_client(self):
RESOURCE = self._make_resource()
# Ensure None for missing server-set props
del RESOURCE["statistics"]["creationTime"]
del RESOURCE["etag"]
del RESOURCE["selfLink"]
del RESOURCE["user_email"]
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client)
job._begin()
conn.api_request.assert_called_once_with(
method="POST",
path="/projects/{}/jobs".format(self.PROJECT),
data={
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {
"load": {
"sourceUris": [self.SOURCE1],
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.TABLE_ID,
},
}
},
},
timeout=None,
)
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_autodetect(self):
from google.cloud.bigquery.job import LoadJobConfig
path = "/projects/{}/jobs".format(self.PROJECT)
resource = self._make_resource()
resource["configuration"]["load"]["autodetect"] = True
# Ensure None for missing server-set props
del resource["statistics"]["creationTime"]
del resource["etag"]
del resource["selfLink"]
del resource["user_email"]
conn = _make_connection(resource)
client = _make_client(project=self.PROJECT, connection=conn)
config = LoadJobConfig()
config.autodetect = True
job = self._make_one(
self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client, config
)
job._begin()
sent = {
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {
"load": {
"sourceUris": [self.SOURCE1],
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.TABLE_ID,
},
"autodetect": True,
}
},
}
conn.api_request.assert_called_once_with(
method="POST", path=path, data=sent, timeout=None
)
self._verifyResourceProperties(job, resource)
def test_begin_w_alternate_client(self):
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import LoadJobConfig
from google.cloud.bigquery.job import SchemaUpdateOption
from google.cloud.bigquery.job import WriteDisposition
from google.cloud.bigquery.schema import SchemaField
PATH = "/projects/%s/jobs" % (self.PROJECT,)
RESOURCE = self._make_resource(ended=True)
LOAD_CONFIGURATION = {
"sourceUris": [self.SOURCE1],
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.TABLE_ID,
},
"allowJaggedRows": True,
"allowQuotedNewlines": True,
"createDisposition": CreateDisposition.CREATE_NEVER,
"encoding": "ISO-8559-1",
"fieldDelimiter": "|",
"ignoreUnknownValues": True,
"maxBadRecords": 100,
"nullMarker": r"\N",
"quote": "'",
"skipLeadingRows": "1",
"sourceFormat": "CSV",
"useAvroLogicalTypes": True,
"writeDisposition": WriteDisposition.WRITE_TRUNCATE,
"schema": {
"fields": [
{
"name": "full_name",
"type": "STRING",
"mode": "REQUIRED",
"description": None,
},
{
"name": "age",
"type": "INTEGER",
"mode": "REQUIRED",
"description": None,
},
]
},
"schemaUpdateOptions": [SchemaUpdateOption.ALLOW_FIELD_ADDITION],
}
RESOURCE["configuration"]["load"] = LOAD_CONFIGURATION
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection(RESOURCE)
client2 = _make_client(project=self.PROJECT, connection=conn2)
full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
age = SchemaField("age", "INTEGER", mode="REQUIRED")
config = LoadJobConfig()
config.schema = [full_name, age]
job = self._make_one(
self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client1, config
)
config.allow_jagged_rows = True
config.allow_quoted_newlines = True
config.create_disposition = CreateDisposition.CREATE_NEVER
config.encoding = "ISO-8559-1"
config.field_delimiter = "|"
config.ignore_unknown_values = True
config.max_bad_records = 100
config.null_marker = r"\N"
config.quote_character = "'"
config.skip_leading_rows = 1
config.source_format = "CSV"
config.use_avro_logical_types = True
config.write_disposition = WriteDisposition.WRITE_TRUNCATE
config.schema_update_options = [SchemaUpdateOption.ALLOW_FIELD_ADDITION]
job._begin(client=client2)
conn1.api_request.assert_not_called()
self.assertEqual(len(conn2.api_request.call_args_list), 1)
req = conn2.api_request.call_args_list[0]
self.assertEqual(req[1]["method"], "POST")
self.assertEqual(req[1]["path"], PATH)
SENT = {
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {"load": LOAD_CONFIGURATION},
}
self.maxDiff = None
self.assertEqual(req[1]["data"], SENT)
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_job_reference(self):
from google.cloud.bigquery import job
resource = self._make_resource()
resource["jobReference"]["projectId"] = "alternative-project"
resource["jobReference"]["location"] = "US"
job_ref = job._JobReference(self.JOB_ID, "alternative-project", "US")
conn = _make_connection(resource)
client = _make_client(project=self.PROJECT, connection=conn)
load_job = self._make_one(job_ref, [self.SOURCE1], self.TABLE_REF, client)
load_job._begin()
conn.api_request.assert_called_once()
_, request = conn.api_request.call_args
self.assertEqual(request["method"], "POST")
self.assertEqual(request["path"], "/projects/alternative-project/jobs")
self.assertEqual(
request["data"]["jobReference"]["projectId"], "alternative-project"
)
self.assertEqual(request["data"]["jobReference"]["location"], "US")
self.assertEqual(request["data"]["jobReference"]["jobId"], self.JOB_ID)
def test_exists_miss_w_bound_client(self):
PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
conn = _make_connection()
client = _make_client(project=self.PROJECT, connection=conn)
job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client)
self.assertFalse(job.exists())
conn.api_request.assert_called_once_with(
method="GET", path=PATH, query_params={"fields": "id"}, timeout=None
)
def test_exists_hit_w_alternate_client(self):
PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection({})
client2 = _make_client(project=self.PROJECT, connection=conn2)
job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client1)
self.assertTrue(job.exists(client=client2))
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method="GET", path=PATH, query_params={"fields": "id"}, timeout=None
)
def test_exists_miss_w_job_reference(self):
from google.cloud.bigquery import job
job_ref = job._JobReference("my-job-id", "other-project", "US")
conn = _make_connection()
client = _make_client(project=self.PROJECT, connection=conn)
load_job = self._make_one(job_ref, [self.SOURCE1], self.TABLE_REF, client)
self.assertFalse(load_job.exists())
conn.api_request.assert_called_once_with(
method="GET",
path="/projects/other-project/jobs/my-job-id",
query_params={"fields": "id", "location": "US"},
timeout=None,
)
def test_reload_w_bound_client(self):
PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
RESOURCE = self._make_resource()
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client)
job.reload()
conn.api_request.assert_called_once_with(
method="GET", path=PATH, query_params={}, timeout=None
)
self._verifyResourceProperties(job, RESOURCE)
def test_reload_w_alternate_client(self):
PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
RESOURCE = self._make_resource()
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection(RESOURCE)
client2 = _make_client(project=self.PROJECT, connection=conn2)
job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client1)
job.reload(client=client2)
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method="GET", path=PATH, query_params={}, timeout=None
)
self._verifyResourceProperties(job, RESOURCE)
def test_reload_w_job_reference(self):
from google.cloud.bigquery import job
resource = self._make_resource(ended=True)
resource["jobReference"]["projectId"] = "alternative-project"
resource["jobReference"]["location"] = "US"
job_ref = job._JobReference(self.JOB_ID, "alternative-project", "US")
conn = _make_connection(resource)
client = _make_client(project=self.PROJECT, connection=conn)
load_job = self._make_one(job_ref, [self.SOURCE1], self.TABLE_REF, client)
load_job.reload()
conn.api_request.assert_called_once_with(
method="GET",
path="/projects/alternative-project/jobs/{}".format(self.JOB_ID),
query_params={"location": "US"},
timeout=None,
)
def test_cancel_w_bound_client(self):
PATH = "/projects/%s/jobs/%s/cancel" % (self.PROJECT, self.JOB_ID)
RESOURCE = self._make_resource(ended=True)
RESPONSE = {"job": RESOURCE}
conn = _make_connection(RESPONSE)
client = _make_client(project=self.PROJECT, connection=conn)
job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client)
job.cancel()
conn.api_request.assert_called_once_with(
method="POST", path=PATH, query_params={}, timeout=None,
)
self._verifyResourceProperties(job, RESOURCE)
def test_cancel_w_alternate_client(self):
PATH = "/projects/%s/jobs/%s/cancel" % (self.PROJECT, self.JOB_ID)
RESOURCE = self._make_resource(ended=True)
RESPONSE = {"job": RESOURCE}
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection(RESPONSE)
client2 = _make_client(project=self.PROJECT, connection=conn2)
job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client1)
job.cancel(client=client2)
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method="POST", path=PATH, query_params={}, timeout=None,
)
self._verifyResourceProperties(job, RESOURCE)
def test_cancel_w_job_reference(self):
from google.cloud.bigquery import job
resource = self._make_resource(ended=True)
resource["jobReference"]["projectId"] = "alternative-project"
resource["jobReference"]["location"] = "US"
job_ref = job._JobReference(self.JOB_ID, "alternative-project", "US")
conn = _make_connection({"job": resource})
client = _make_client(project=self.PROJECT, connection=conn)
load_job = self._make_one(job_ref, [self.SOURCE1], self.TABLE_REF, client)
load_job.cancel()
conn.api_request.assert_called_once_with(
method="POST",
path="/projects/alternative-project/jobs/{}/cancel".format(self.JOB_ID),
query_params={"location": "US"},
timeout=None,
)
class TestCopyJobConfig(unittest.TestCase, _Base):
JOB_TYPE = "copy"
@staticmethod
def _get_target_class():
from google.cloud.bigquery.job import CopyJobConfig
return CopyJobConfig
def test_ctor_w_properties(self):
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import WriteDisposition
create_disposition = CreateDisposition.CREATE_NEVER
write_disposition = WriteDisposition.WRITE_TRUNCATE
config = self._get_target_class()(
create_disposition=create_disposition, write_disposition=write_disposition
)
self.assertEqual(config.create_disposition, create_disposition)
self.assertEqual(config.write_disposition, write_disposition)
def test_to_api_repr_with_encryption(self):
from google.cloud.bigquery.encryption_configuration import (
EncryptionConfiguration,
)
config = self._make_one()
config.destination_encryption_configuration = EncryptionConfiguration(
kms_key_name=self.KMS_KEY_NAME
)
resource = config.to_api_repr()
self.assertEqual(
resource,
{
"copy": {
"destinationEncryptionConfiguration": {
"kmsKeyName": self.KMS_KEY_NAME
}
}
},
)
def test_to_api_repr_with_encryption_none(self):
config = self._make_one()
config.destination_encryption_configuration = None
resource = config.to_api_repr()
self.assertEqual(
resource, {"copy": {"destinationEncryptionConfiguration": None}}
)
class TestCopyJob(unittest.TestCase, _Base):
JOB_TYPE = "copy"
SOURCE_TABLE = "source_table"
DESTINATION_TABLE = "destination_table"
@staticmethod
def _get_target_class():
from google.cloud.bigquery.job import CopyJob
return CopyJob
def _make_resource(self, started=False, ended=False):
resource = super(TestCopyJob, self)._make_resource(started, ended)
config = resource["configuration"]["copy"]
config["sourceTables"] = [
{
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.SOURCE_TABLE,
}
]
config["destinationTable"] = {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.DESTINATION_TABLE,
}
return resource
def _verifyResourceProperties(self, job, resource):
self._verifyReadonlyResourceProperties(job, resource)
config = resource.get("configuration", {}).get("copy")
table_ref = config["destinationTable"]
self.assertEqual(job.destination.project, table_ref["projectId"])
self.assertEqual(job.destination.dataset_id, table_ref["datasetId"])
self.assertEqual(job.destination.table_id, table_ref["tableId"])
sources = config.get("sourceTables")
if sources is None:
sources = [config["sourceTable"]]
self.assertEqual(len(sources), len(job.sources))
for table_ref, table in zip(sources, job.sources):
self.assertEqual(table.project, table_ref["projectId"])
self.assertEqual(table.dataset_id, table_ref["datasetId"])
self.assertEqual(table.table_id, table_ref["tableId"])
if "createDisposition" in config:
self.assertEqual(job.create_disposition, config["createDisposition"])
else:
self.assertIsNone(job.create_disposition)
if "writeDisposition" in config:
self.assertEqual(job.write_disposition, config["writeDisposition"])
else:
self.assertIsNone(job.write_disposition)
if "destinationEncryptionConfiguration" in config:
self.assertIsNotNone(job.destination_encryption_configuration)
self.assertEqual(
job.destination_encryption_configuration.kms_key_name,
config["destinationEncryptionConfiguration"]["kmsKeyName"],
)
else:
self.assertIsNone(job.destination_encryption_configuration)
def test_ctor(self):
client = _make_client(project=self.PROJECT)
source = self._table_ref(self.SOURCE_TABLE)
destination = self._table_ref(self.DESTINATION_TABLE)
job = self._make_one(self.JOB_ID, [source], destination, client)
self.assertIs(job.destination, destination)
self.assertEqual(job.sources, [source])
self.assertIs(job._client, client)
self.assertEqual(job.job_type, self.JOB_TYPE)
self.assertEqual(job.path, "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID))
self._verifyInitialReadonlyProperties(job)
# set/read from resource['configuration']['copy']
self.assertIsNone(job.create_disposition)
self.assertIsNone(job.write_disposition)
self.assertIsNone(job.destination_encryption_configuration)
def test_from_api_repr_missing_identity(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {}
klass = self._get_target_class()
with self.assertRaises(KeyError):
klass.from_api_repr(RESOURCE, client=client)
def test_from_api_repr_missing_config(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
"id": "%s:%s" % (self.PROJECT, self.DS_ID),
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
}
klass = self._get_target_class()
with self.assertRaises(KeyError):
klass.from_api_repr(RESOURCE, client=client)
def test_from_api_repr_bare(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
"id": self.JOB_ID,
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {
"copy": {
"sourceTables": [
{
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.SOURCE_TABLE,
}
],
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.DESTINATION_TABLE,
},
}
},
}
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_from_api_with_encryption(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
"id": self.JOB_ID,
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {
"copy": {
"sourceTables": [
{
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.SOURCE_TABLE,
}
],
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.DESTINATION_TABLE,
},
"destinationEncryptionConfiguration": {
"kmsKeyName": self.KMS_KEY_NAME
},
}
},
}
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_from_api_repr_w_sourcetable(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
"id": self.JOB_ID,
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {
"copy": {
"sourceTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.SOURCE_TABLE,
},
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.DESTINATION_TABLE,
},
}
},
}
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_from_api_repr_wo_sources(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
"id": self.JOB_ID,
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {
"copy": {
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.DESTINATION_TABLE,
}
}
},
}
klass = self._get_target_class()
with self.assertRaises(KeyError):
klass.from_api_repr(RESOURCE, client=client)
def test_from_api_repr_w_properties(self):
from google.cloud.bigquery.job import CreateDisposition
client = _make_client(project=self.PROJECT)
RESOURCE = self._make_resource()
copy_config = RESOURCE["configuration"]["copy"]
copy_config["createDisposition"] = CreateDisposition.CREATE_IF_NEEDED
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_bound_client(self):
PATH = "/projects/%s/jobs" % (self.PROJECT,)
RESOURCE = self._make_resource()
# Ensure None for missing server-set props
del RESOURCE["statistics"]["creationTime"]
del RESOURCE["etag"]
del RESOURCE["selfLink"]
del RESOURCE["user_email"]
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
source = self._table_ref(self.SOURCE_TABLE)
destination = self._table_ref(self.DESTINATION_TABLE)
job = self._make_one(self.JOB_ID, [source], destination, client)
job._begin()
conn.api_request.assert_called_once_with(
method="POST",
path=PATH,
data={
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {
"copy": {
"sourceTables": [
{
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.SOURCE_TABLE,
}
],
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.DESTINATION_TABLE,
},
}
},
},
timeout=None,
)
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_alternate_client(self):
from google.cloud.bigquery.job import CopyJobConfig
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import WriteDisposition
PATH = "/projects/%s/jobs" % (self.PROJECT,)
RESOURCE = self._make_resource(ended=True)
COPY_CONFIGURATION = {
"sourceTables": [
{
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.SOURCE_TABLE,
}
],
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.DESTINATION_TABLE,
},
"createDisposition": CreateDisposition.CREATE_NEVER,
"writeDisposition": WriteDisposition.WRITE_TRUNCATE,
}
RESOURCE["configuration"]["copy"] = COPY_CONFIGURATION
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection(RESOURCE)
client2 = _make_client(project=self.PROJECT, connection=conn2)
source = self._table_ref(self.SOURCE_TABLE)
destination = self._table_ref(self.DESTINATION_TABLE)
config = CopyJobConfig()
config.create_disposition = CreateDisposition.CREATE_NEVER
config.write_disposition = WriteDisposition.WRITE_TRUNCATE
job = self._make_one(self.JOB_ID, [source], destination, client1, config)
job._begin(client=client2)
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method="POST",
path=PATH,
data={
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {"copy": COPY_CONFIGURATION},
},
timeout=None,
)
self._verifyResourceProperties(job, RESOURCE)
def test_exists_miss_w_bound_client(self):
PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
conn = _make_connection()
client = _make_client(project=self.PROJECT, connection=conn)
source = self._table_ref(self.SOURCE_TABLE)
destination = self._table_ref(self.DESTINATION_TABLE)
job = self._make_one(self.JOB_ID, [source], destination, client)
self.assertFalse(job.exists())
conn.api_request.assert_called_once_with(
method="GET", path=PATH, query_params={"fields": "id"}, timeout=None,
)
def test_exists_hit_w_alternate_client(self):
PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection({})
client2 = _make_client(project=self.PROJECT, connection=conn2)
source = self._table_ref(self.SOURCE_TABLE)
destination = self._table_ref(self.DESTINATION_TABLE)
job = self._make_one(self.JOB_ID, [source], destination, client1)
self.assertTrue(job.exists(client=client2))
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method="GET", path=PATH, query_params={"fields": "id"}, timeout=None
)
def test_reload_w_bound_client(self):
PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
RESOURCE = self._make_resource()
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
source = self._table_ref(self.SOURCE_TABLE)
destination = self._table_ref(self.DESTINATION_TABLE)
job = self._make_one(self.JOB_ID, [source], destination, client)
job.reload()
conn.api_request.assert_called_once_with(
method="GET", path=PATH, query_params={}, timeout=None
)
self._verifyResourceProperties(job, RESOURCE)
def test_reload_w_alternate_client(self):
PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
RESOURCE = self._make_resource()
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection(RESOURCE)
client2 = _make_client(project=self.PROJECT, connection=conn2)
source = self._table_ref(self.SOURCE_TABLE)
destination = self._table_ref(self.DESTINATION_TABLE)
job = self._make_one(self.JOB_ID, [source], destination, client1)
job.reload(client=client2)
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method="GET", path=PATH, query_params={}, timeout=None
)
self._verifyResourceProperties(job, RESOURCE)
class TestExtractJobConfig(unittest.TestCase, _Base):
JOB_TYPE = "extract"
@staticmethod
def _get_target_class():
from google.cloud.bigquery.job import ExtractJobConfig
return ExtractJobConfig
def test_ctor_w_properties(self):
config = self._get_target_class()(field_delimiter="\t", print_header=True)
self.assertEqual(config.field_delimiter, "\t")
self.assertTrue(config.print_header)
def test_to_api_repr(self):
from google.cloud.bigquery import job
config = self._make_one()
config.compression = job.Compression.SNAPPY
config.destination_format = job.DestinationFormat.AVRO
config.field_delimiter = "ignored for avro"
config.print_header = False
config._properties["extract"]["someNewField"] = "some-value"
config.use_avro_logical_types = True
resource = config.to_api_repr()
self.assertEqual(
resource,
{
"extract": {
"compression": "SNAPPY",
"destinationFormat": "AVRO",
"fieldDelimiter": "ignored for avro",
"printHeader": False,
"someNewField": "some-value",
"useAvroLogicalTypes": True,
}
},
)
def test_from_api_repr(self):
cls = self._get_target_class()
config = cls.from_api_repr(
{
"extract": {
"compression": "NONE",
"destinationFormat": "CSV",
"fieldDelimiter": "\t",
"printHeader": True,
"someNewField": "some-value",
"useAvroLogicalTypes": False,
}
}
)
self.assertEqual(config.compression, "NONE")
self.assertEqual(config.destination_format, "CSV")
self.assertEqual(config.field_delimiter, "\t")
self.assertEqual(config.print_header, True)
self.assertEqual(config._properties["extract"]["someNewField"], "some-value")
self.assertEqual(config.use_avro_logical_types, False)
class TestExtractJob(unittest.TestCase, _Base):
JOB_TYPE = "extract"
SOURCE_TABLE = "source_table"
DESTINATION_URI = "gs://bucket_name/object_name"
@staticmethod
def _get_target_class():
from google.cloud.bigquery.job import ExtractJob
return ExtractJob
def _make_resource(self, started=False, ended=False):
resource = super(TestExtractJob, self)._make_resource(started, ended)
config = resource["configuration"]["extract"]
config["sourceTable"] = {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.SOURCE_TABLE,
}
config["destinationUris"] = [self.DESTINATION_URI]
return resource
def _verifyResourceProperties(self, job, resource):
self._verifyReadonlyResourceProperties(job, resource)
config = resource.get("configuration", {}).get("extract")
self.assertEqual(job.destination_uris, config["destinationUris"])
table_ref = config["sourceTable"]
self.assertEqual(job.source.project, table_ref["projectId"])
self.assertEqual(job.source.dataset_id, table_ref["datasetId"])
self.assertEqual(job.source.table_id, table_ref["tableId"])
if "compression" in config:
self.assertEqual(job.compression, config["compression"])
else:
self.assertIsNone(job.compression)
if "destinationFormat" in config:
self.assertEqual(job.destination_format, config["destinationFormat"])
else:
self.assertIsNone(job.destination_format)
if "fieldDelimiter" in config:
self.assertEqual(job.field_delimiter, config["fieldDelimiter"])
else:
self.assertIsNone(job.field_delimiter)
if "printHeader" in config:
self.assertEqual(job.print_header, config["printHeader"])
else:
self.assertIsNone(job.print_header)
def test_ctor(self):
from google.cloud.bigquery.table import Table
client = _make_client(project=self.PROJECT)
source = Table(self.TABLE_REF)
job = self._make_one(self.JOB_ID, source, [self.DESTINATION_URI], client)
self.assertEqual(job.source.project, self.PROJECT)
self.assertEqual(job.source.dataset_id, self.DS_ID)
self.assertEqual(job.source.table_id, self.TABLE_ID)
self.assertEqual(job.destination_uris, [self.DESTINATION_URI])
self.assertIs(job._client, client)
self.assertEqual(job.job_type, self.JOB_TYPE)
self.assertEqual(job.path, "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID))
self._verifyInitialReadonlyProperties(job)
# set/read from resource['configuration']['extract']
self.assertIsNone(job.compression)
self.assertIsNone(job.destination_format)
self.assertIsNone(job.field_delimiter)
self.assertIsNone(job.print_header)
def test_destination_uri_file_counts(self):
file_counts = 23
client = _make_client(project=self.PROJECT)
job = self._make_one(
self.JOB_ID, self.TABLE_REF, [self.DESTINATION_URI], client
)
self.assertIsNone(job.destination_uri_file_counts)
statistics = job._properties["statistics"] = {}
self.assertIsNone(job.destination_uri_file_counts)
extract_stats = statistics["extract"] = {}
self.assertIsNone(job.destination_uri_file_counts)
extract_stats["destinationUriFileCounts"] = [str(file_counts)]
self.assertEqual(job.destination_uri_file_counts, [file_counts])
def test_from_api_repr_missing_identity(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {}
klass = self._get_target_class()
with self.assertRaises(KeyError):
klass.from_api_repr(RESOURCE, client=client)
def test_from_api_repr_missing_config(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
"id": "%s:%s" % (self.PROJECT, self.DS_ID),
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
}
klass = self._get_target_class()
with self.assertRaises(KeyError):
klass.from_api_repr(RESOURCE, client=client)
def test_from_api_repr_bare(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
"id": self.JOB_ID,
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {
"extract": {
"sourceTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.SOURCE_TABLE,
},
"destinationUris": [self.DESTINATION_URI],
}
},
}
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_from_api_repr_w_properties(self):
from google.cloud.bigquery.job import Compression
client = _make_client(project=self.PROJECT)
RESOURCE = self._make_resource()
extract_config = RESOURCE["configuration"]["extract"]
extract_config["compression"] = Compression.GZIP
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_bound_client(self):
from google.cloud.bigquery.dataset import DatasetReference
PATH = "/projects/%s/jobs" % (self.PROJECT,)
RESOURCE = self._make_resource()
# Ensure None for missing server-set props
del RESOURCE["statistics"]["creationTime"]
del RESOURCE["etag"]
del RESOURCE["selfLink"]
del RESOURCE["user_email"]
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
source_dataset = DatasetReference(self.PROJECT, self.DS_ID)
source = source_dataset.table(self.SOURCE_TABLE)
job = self._make_one(self.JOB_ID, source, [self.DESTINATION_URI], client)
job._begin()
conn.api_request.assert_called_once_with(
method="POST",
path=PATH,
data={
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {
"extract": {
"sourceTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.SOURCE_TABLE,
},
"destinationUris": [self.DESTINATION_URI],
}
},
},
timeout=None,
)
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_alternate_client(self):
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery.job import Compression
from google.cloud.bigquery.job import DestinationFormat
from google.cloud.bigquery.job import ExtractJobConfig
PATH = "/projects/%s/jobs" % (self.PROJECT,)
RESOURCE = self._make_resource(ended=True)
EXTRACT_CONFIGURATION = {
"sourceTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.SOURCE_TABLE,
},
"destinationUris": [self.DESTINATION_URI],
"compression": Compression.GZIP,
"destinationFormat": DestinationFormat.NEWLINE_DELIMITED_JSON,
"fieldDelimiter": "|",
"printHeader": False,
}
RESOURCE["configuration"]["extract"] = EXTRACT_CONFIGURATION
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection(RESOURCE)
client2 = _make_client(project=self.PROJECT, connection=conn2)
source_dataset = DatasetReference(self.PROJECT, self.DS_ID)
source = source_dataset.table(self.SOURCE_TABLE)
config = ExtractJobConfig()
config.compression = Compression.GZIP
config.destination_format = DestinationFormat.NEWLINE_DELIMITED_JSON
config.field_delimiter = "|"
config.print_header = False
job = self._make_one(
self.JOB_ID, source, [self.DESTINATION_URI], client1, config
)
job._begin(client=client2)
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method="POST",
path=PATH,
data={
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {"extract": EXTRACT_CONFIGURATION},
},
timeout=None,
)
self._verifyResourceProperties(job, RESOURCE)
def test_exists_miss_w_bound_client(self):
PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
conn = _make_connection()
client = _make_client(project=self.PROJECT, connection=conn)
job = self._make_one(
self.JOB_ID, self.TABLE_REF, [self.DESTINATION_URI], client
)
self.assertFalse(job.exists())
conn.api_request.assert_called_once_with(
method="GET", path=PATH, query_params={"fields": "id"}, timeout=None,
)
def test_exists_hit_w_alternate_client(self):
PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection({})
client2 = _make_client(project=self.PROJECT, connection=conn2)
job = self._make_one(
self.JOB_ID, self.TABLE_REF, [self.DESTINATION_URI], client1
)
self.assertTrue(job.exists(client=client2))
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method="GET", path=PATH, query_params={"fields": "id"}, timeout=None
)
def test_reload_w_bound_client(self):
from google.cloud.bigquery.dataset import DatasetReference
PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
RESOURCE = self._make_resource()
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
source_dataset = DatasetReference(self.PROJECT, self.DS_ID)
source = source_dataset.table(self.SOURCE_TABLE)
job = self._make_one(self.JOB_ID, source, [self.DESTINATION_URI], client)
job.reload()
conn.api_request.assert_called_once_with(
method="GET", path=PATH, query_params={}, timeout=None
)
self._verifyResourceProperties(job, RESOURCE)
def test_reload_w_alternate_client(self):
from google.cloud.bigquery.dataset import DatasetReference
PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
RESOURCE = self._make_resource()
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection(RESOURCE)
client2 = _make_client(project=self.PROJECT, connection=conn2)
source_dataset = DatasetReference(self.PROJECT, self.DS_ID)
source = source_dataset.table(self.SOURCE_TABLE)
job = self._make_one(self.JOB_ID, source, [self.DESTINATION_URI], client1)
job.reload(client=client2)
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method="GET", path=PATH, query_params={}, timeout=None
)
self._verifyResourceProperties(job, RESOURCE)
class TestQueryJobConfig(unittest.TestCase, _Base):
@staticmethod
def _get_target_class():
from google.cloud.bigquery.job import QueryJobConfig
return QueryJobConfig
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
config = self._make_one()
self.assertEqual(config._properties, {"query": {}})
def test_ctor_w_none(self):
config = self._make_one()
config.default_dataset = None
config.destination = None
self.assertIsNone(config.default_dataset)
self.assertIsNone(config.destination)
def test_ctor_w_properties(self):
config = self._get_target_class()(use_query_cache=False, use_legacy_sql=True)
self.assertFalse(config.use_query_cache)
self.assertTrue(config.use_legacy_sql)
def test_ctor_w_string_default_dataset(self):
from google.cloud.bigquery import dataset
default_dataset = "default-proj.default_dset"
config = self._get_target_class()(default_dataset=default_dataset)
expected = dataset.DatasetReference.from_string(default_dataset)
self.assertEqual(config.default_dataset, expected)
def test_ctor_w_string_destinaton(self):
from google.cloud.bigquery import table
destination = "dest-proj.dest_dset.dest_tbl"
config = self._get_target_class()(destination=destination)
expected = table.TableReference.from_string(destination)
self.assertEqual(config.destination, expected)
def test_default_dataset_w_string(self):
from google.cloud.bigquery import dataset
default_dataset = "default-proj.default_dset"
config = self._make_one()
config.default_dataset = default_dataset
expected = dataset.DatasetReference.from_string(default_dataset)
self.assertEqual(config.default_dataset, expected)
def test_default_dataset_w_dataset(self):
from google.cloud.bigquery import dataset
default_dataset = "default-proj.default_dset"
expected = dataset.DatasetReference.from_string(default_dataset)
config = self._make_one()
config.default_dataset = dataset.Dataset(expected)
self.assertEqual(config.default_dataset, expected)
def test_destinaton_w_string(self):
from google.cloud.bigquery import table
destination = "dest-proj.dest_dset.dest_tbl"
config = self._make_one()
config.destination = destination
expected = table.TableReference.from_string(destination)
self.assertEqual(config.destination, expected)
def test_range_partitioning_w_none(self):
object_under_test = self._get_target_class()()
assert object_under_test.range_partitioning is None
def test_range_partitioning_w_value(self):
object_under_test = self._get_target_class()()
object_under_test._properties["query"]["rangePartitioning"] = {
"field": "column_one",
"range": {"start": 1, "end": 1000, "interval": 10},
}
object_under_test.range_partitioning.field == "column_one"
object_under_test.range_partitioning.range_.start == 1
object_under_test.range_partitioning.range_.end == 1000
object_under_test.range_partitioning.range_.interval == 10
def test_range_partitioning_setter(self):
from google.cloud.bigquery.table import PartitionRange
from google.cloud.bigquery.table import RangePartitioning
object_under_test = self._get_target_class()()
object_under_test.range_partitioning = RangePartitioning(
field="column_one", range_=PartitionRange(start=1, end=1000, interval=10)
)
object_under_test.range_partitioning.field == "column_one"
object_under_test.range_partitioning.range_.start == 1
object_under_test.range_partitioning.range_.end == 1000
object_under_test.range_partitioning.range_.interval == 10
def test_range_partitioning_setter_w_none(self):
object_under_test = self._get_target_class()()
object_under_test.range_partitioning = None
assert object_under_test.range_partitioning is None
def test_range_partitioning_setter_w_wrong_type(self):
object_under_test = self._get_target_class()()
with pytest.raises(ValueError, match="RangePartitioning"):
object_under_test.range_partitioning = object()
def test_time_partitioning(self):
from google.cloud.bigquery import table
time_partitioning = table.TimePartitioning(
type_=table.TimePartitioningType.DAY, field="name"
)
config = self._make_one()
config.time_partitioning = time_partitioning
# TimePartitioning should be configurable after assigning
time_partitioning.expiration_ms = 10000
self.assertEqual(config.time_partitioning.type_, table.TimePartitioningType.DAY)
self.assertEqual(config.time_partitioning.field, "name")
self.assertEqual(config.time_partitioning.expiration_ms, 10000)
config.time_partitioning = None
self.assertIsNone(config.time_partitioning)
def test_clustering_fields(self):
fields = ["email", "postal_code"]
config = self._get_target_class()()
config.clustering_fields = fields
self.assertEqual(config.clustering_fields, fields)
config.clustering_fields = None
self.assertIsNone(config.clustering_fields)
def test_from_api_repr_empty(self):
klass = self._get_target_class()
config = klass.from_api_repr({})
self.assertIsNone(config.dry_run)
self.assertIsNone(config.use_legacy_sql)
self.assertIsNone(config.default_dataset)
self.assertIsNone(config.destination)
self.assertIsNone(config.destination_encryption_configuration)
def test_from_api_repr_normal(self):
from google.cloud.bigquery.dataset import DatasetReference
resource = {
"query": {
"useLegacySql": True,
"query": "no property for me",
"defaultDataset": {
"projectId": "someproject",
"datasetId": "somedataset",
},
"someNewProperty": "I should be saved, too.",
},
"dryRun": True,
}
klass = self._get_target_class()
config = klass.from_api_repr(resource)
self.assertTrue(config.use_legacy_sql)
self.assertEqual(
config.default_dataset, DatasetReference("someproject", "somedataset")
)
self.assertTrue(config.dry_run)
# Make sure unknown properties propagate.
self.assertEqual(config._properties["query"]["query"], "no property for me")
self.assertEqual(
config._properties["query"]["someNewProperty"], "I should be saved, too."
)
def test_to_api_repr_normal(self):
from google.cloud.bigquery.dataset import DatasetReference
config = self._make_one()
config.use_legacy_sql = True
config.default_dataset = DatasetReference("someproject", "somedataset")
config.dry_run = False
config._properties["someNewProperty"] = "Woohoo, alpha stuff."
resource = config.to_api_repr()
self.assertFalse(resource["dryRun"])
self.assertTrue(resource["query"]["useLegacySql"])
self.assertEqual(
resource["query"]["defaultDataset"]["projectId"], "someproject"
)
self.assertEqual(
resource["query"]["defaultDataset"]["datasetId"], "somedataset"
)
# Make sure unknown properties propagate.
self.assertEqual(resource["someNewProperty"], "Woohoo, alpha stuff.")
def test_to_api_repr_with_encryption(self):
from google.cloud.bigquery.encryption_configuration import (
EncryptionConfiguration,
)
config = self._make_one()
config.destination_encryption_configuration = EncryptionConfiguration(
kms_key_name=self.KMS_KEY_NAME
)
resource = config.to_api_repr()
self.assertEqual(
resource,
{
"query": {
"destinationEncryptionConfiguration": {
"kmsKeyName": self.KMS_KEY_NAME
}
}
},
)
def test_to_api_repr_with_encryption_none(self):
config = self._make_one()
config.destination_encryption_configuration = None
resource = config.to_api_repr()
self.assertEqual(
resource, {"query": {"destinationEncryptionConfiguration": None}}
)
def test_from_api_repr_with_encryption(self):
resource = {
"query": {
"destinationEncryptionConfiguration": {"kmsKeyName": self.KMS_KEY_NAME}
}
}
klass = self._get_target_class()
config = klass.from_api_repr(resource)
self.assertEqual(
config.destination_encryption_configuration.kms_key_name, self.KMS_KEY_NAME
)
class TestQueryJob(unittest.TestCase, _Base):
JOB_TYPE = "query"
QUERY = "select count(*) from persons"
DESTINATION_TABLE = "destination_table"
@staticmethod
def _get_target_class():
from google.cloud.bigquery.job import QueryJob
return QueryJob
def _make_resource(self, started=False, ended=False):
resource = super(TestQueryJob, self)._make_resource(started, ended)
config = resource["configuration"]["query"]
config["query"] = self.QUERY
if ended:
resource["status"] = {"state": "DONE"}
return resource
def _verifyBooleanResourceProperties(self, job, config):
if "allowLargeResults" in config:
self.assertEqual(job.allow_large_results, config["allowLargeResults"])
else:
self.assertIsNone(job.allow_large_results)
if "flattenResults" in config:
self.assertEqual(job.flatten_results, config["flattenResults"])
else:
self.assertIsNone(job.flatten_results)
if "useQueryCache" in config:
self.assertEqual(job.use_query_cache, config["useQueryCache"])
else:
self.assertIsNone(job.use_query_cache)
if "useLegacySql" in config:
self.assertEqual(job.use_legacy_sql, config["useLegacySql"])
else:
self.assertIsNone(job.use_legacy_sql)
def _verifyIntegerResourceProperties(self, job, config):
if "maximumBillingTier" in config:
self.assertEqual(job.maximum_billing_tier, config["maximumBillingTier"])
else:
self.assertIsNone(job.maximum_billing_tier)
if "maximumBytesBilled" in config:
self.assertEqual(
str(job.maximum_bytes_billed), config["maximumBytesBilled"]
)
self.assertIsInstance(job.maximum_bytes_billed, int)
else:
self.assertIsNone(job.maximum_bytes_billed)
def _verify_udf_resources(self, job, config):
udf_resources = config.get("userDefinedFunctionResources", ())
self.assertEqual(len(job.udf_resources), len(udf_resources))
for found, expected in zip(job.udf_resources, udf_resources):
if "resourceUri" in expected:
self.assertEqual(found.udf_type, "resourceUri")
self.assertEqual(found.value, expected["resourceUri"])
else:
self.assertEqual(found.udf_type, "inlineCode")
self.assertEqual(found.value, expected["inlineCode"])
def _verifyQueryParameters(self, job, config):
query_parameters = config.get("queryParameters", ())
self.assertEqual(len(job.query_parameters), len(query_parameters))
for found, expected in zip(job.query_parameters, query_parameters):
self.assertEqual(found.to_api_repr(), expected)
def _verify_table_definitions(self, job, config):
table_defs = config.get("tableDefinitions")
if job.table_definitions is None:
self.assertIsNone(table_defs)
else:
self.assertEqual(len(job.table_definitions), len(table_defs))
for found_key, found_ec in job.table_definitions.items():
expected_ec = table_defs.get(found_key)
self.assertIsNotNone(expected_ec)
self.assertEqual(found_ec.to_api_repr(), expected_ec)
def _verify_configuration_properties(self, job, configuration):
if "dryRun" in configuration:
self.assertEqual(job.dry_run, configuration["dryRun"])
else:
self.assertIsNone(job.dry_run)
def _verifyResourceProperties(self, job, resource):
self._verifyReadonlyResourceProperties(job, resource)
configuration = resource.get("configuration", {})
self._verify_configuration_properties(job, configuration)
query_config = resource.get("configuration", {}).get("query")
self._verifyBooleanResourceProperties(job, query_config)
self._verifyIntegerResourceProperties(job, query_config)
self._verify_udf_resources(job, query_config)
self._verifyQueryParameters(job, query_config)
self._verify_table_definitions(job, query_config)
self.assertEqual(job.query, query_config["query"])
if "createDisposition" in query_config:
self.assertEqual(job.create_disposition, query_config["createDisposition"])
else:
self.assertIsNone(job.create_disposition)
if "defaultDataset" in query_config:
ds_ref = job.default_dataset
ds_ref = {"projectId": ds_ref.project, "datasetId": ds_ref.dataset_id}
self.assertEqual(ds_ref, query_config["defaultDataset"])
else:
self.assertIsNone(job.default_dataset)
if "destinationTable" in query_config:
table = job.destination
tb_ref = {
"projectId": table.project,
"datasetId": table.dataset_id,
"tableId": table.table_id,
}
self.assertEqual(tb_ref, query_config["destinationTable"])
else:
self.assertIsNone(job.destination)
if "priority" in query_config:
self.assertEqual(job.priority, query_config["priority"])
else:
self.assertIsNone(job.priority)
if "writeDisposition" in query_config:
self.assertEqual(job.write_disposition, query_config["writeDisposition"])
else:
self.assertIsNone(job.write_disposition)
if "destinationEncryptionConfiguration" in query_config:
self.assertIsNotNone(job.destination_encryption_configuration)
self.assertEqual(
job.destination_encryption_configuration.kms_key_name,
query_config["destinationEncryptionConfiguration"]["kmsKeyName"],
)
else:
self.assertIsNone(job.destination_encryption_configuration)
if "schemaUpdateOptions" in query_config:
self.assertEqual(
job.schema_update_options, query_config["schemaUpdateOptions"]
)
else:
self.assertIsNone(job.schema_update_options)
def test_ctor_defaults(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertEqual(job.query, self.QUERY)
self.assertIs(job._client, client)
self.assertEqual(job.job_type, self.JOB_TYPE)
self.assertEqual(job.path, "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID))
self._verifyInitialReadonlyProperties(job)
self.assertFalse(job.use_legacy_sql)
# set/read from resource['configuration']['query']
self.assertIsNone(job.allow_large_results)
self.assertIsNone(job.create_disposition)
self.assertIsNone(job.default_dataset)
self.assertIsNone(job.destination)
self.assertIsNone(job.flatten_results)
self.assertIsNone(job.priority)
self.assertIsNone(job.use_query_cache)
self.assertIsNone(job.dry_run)
self.assertIsNone(job.write_disposition)
self.assertIsNone(job.maximum_billing_tier)
self.assertIsNone(job.maximum_bytes_billed)
self.assertIsNone(job.table_definitions)
self.assertIsNone(job.destination_encryption_configuration)
self.assertIsNone(job.range_partitioning)
self.assertIsNone(job.time_partitioning)
self.assertIsNone(job.clustering_fields)
self.assertIsNone(job.schema_update_options)
def test_ctor_w_udf_resources(self):
from google.cloud.bigquery.job import QueryJobConfig
from google.cloud.bigquery.query import UDFResource
RESOURCE_URI = "gs://some-bucket/js/lib.js"
udf_resources = [UDFResource("resourceUri", RESOURCE_URI)]
client = _make_client(project=self.PROJECT)
config = QueryJobConfig()
config.udf_resources = udf_resources
job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=config)
self.assertEqual(job.udf_resources, udf_resources)
def test_ctor_w_query_parameters(self):
from google.cloud.bigquery.job import QueryJobConfig
from google.cloud.bigquery.query import ScalarQueryParameter
query_parameters = [ScalarQueryParameter("foo", "INT64", 123)]
client = _make_client(project=self.PROJECT)
config = QueryJobConfig(query_parameters=query_parameters)
job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=config)
self.assertEqual(job.query_parameters, query_parameters)
def test_from_api_repr_missing_identity(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {}
klass = self._get_target_class()
with self.assertRaises(KeyError):
klass.from_api_repr(RESOURCE, client=client)
def test_from_api_repr_missing_config(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
"id": "%s:%s" % (self.PROJECT, self.DS_ID),
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
}
klass = self._get_target_class()
with self.assertRaises(KeyError):
klass.from_api_repr(RESOURCE, client=client)
def test_from_api_repr_bare(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
"id": self.JOB_ID,
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {"query": {"query": self.QUERY}},
}
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_from_api_repr_with_encryption(self):
self._setUpConstants()
client = _make_client(project=self.PROJECT)
RESOURCE = {
"id": self.JOB_ID,
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {
"query": {
"query": self.QUERY,
"destinationEncryptionConfiguration": {
"kmsKeyName": self.KMS_KEY_NAME
},
}
},
}
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_from_api_repr_w_properties(self):
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import SchemaUpdateOption
from google.cloud.bigquery.job import WriteDisposition
client = _make_client(project=self.PROJECT)
RESOURCE = self._make_resource()
query_config = RESOURCE["configuration"]["query"]
query_config["createDisposition"] = CreateDisposition.CREATE_IF_NEEDED
query_config["writeDisposition"] = WriteDisposition.WRITE_TRUNCATE
query_config["destinationTable"] = {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.DESTINATION_TABLE,
}
query_config["schemaUpdateOptions"] = [SchemaUpdateOption.ALLOW_FIELD_ADDITION]
klass = self._get_target_class()
job = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(job._client, client)
self._verifyResourceProperties(job, RESOURCE)
def test_cancelled(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
job._properties["status"] = {
"state": "DONE",
"errorResult": {"reason": "stopped"},
}
self.assertTrue(job.cancelled())
def test_done(self):
client = _make_client(project=self.PROJECT)
resource = self._make_resource(ended=True)
job = self._get_target_class().from_api_repr(resource, client)
self.assertTrue(job.done())
def test_done_w_timeout(self):
client = _make_client(project=self.PROJECT)
resource = self._make_resource(ended=False)
job = self._get_target_class().from_api_repr(resource, client)
with mock.patch.object(
client, "_get_query_results"
) as fake_get_results, mock.patch.object(job, "reload") as fake_reload:
job.done(timeout=42)
fake_get_results.assert_called_once()
call_args = fake_get_results.call_args
self.assertEqual(call_args.kwargs.get("timeout"), 42)
call_args = fake_reload.call_args
self.assertEqual(call_args.kwargs.get("timeout"), 42)
def test_done_w_timeout_and_shorter_internal_api_timeout(self):
from google.cloud.bigquery.job import _TIMEOUT_BUFFER_SECS
from google.cloud.bigquery.job import _SERVER_TIMEOUT_MARGIN_SECS
client = _make_client(project=self.PROJECT)
resource = self._make_resource(ended=False)
job = self._get_target_class().from_api_repr(resource, client)
job._done_timeout = 8.8
with mock.patch.object(
client, "_get_query_results"
) as fake_get_results, mock.patch.object(job, "reload") as fake_reload:
job.done(timeout=42)
# The expected timeout used is the job's own done_timeout minus a
# fixed amount (bigquery.job._TIMEOUT_BUFFER_SECS) increased by the
# safety margin on top of server-side processing timeout - that's
# because that final number is smaller than the given timeout (42 seconds).
expected_timeout = 8.8 - _TIMEOUT_BUFFER_SECS + _SERVER_TIMEOUT_MARGIN_SECS
fake_get_results.assert_called_once()
call_args = fake_get_results.call_args
self.assertAlmostEqual(call_args.kwargs.get("timeout"), expected_timeout)
call_args = fake_reload.call_args
self.assertAlmostEqual(call_args.kwargs.get("timeout"), expected_timeout)
def test_done_w_timeout_and_longer_internal_api_timeout(self):
client = _make_client(project=self.PROJECT)
resource = self._make_resource(ended=False)
job = self._get_target_class().from_api_repr(resource, client)
job._done_timeout = 8.8
with mock.patch.object(
client, "_get_query_results"
) as fake_get_results, mock.patch.object(job, "reload") as fake_reload:
job.done(timeout=5.5)
# The expected timeout used is simply the given timeout, as the latter
# is shorter than the job's internal done timeout.
expected_timeout = 5.5
fake_get_results.assert_called_once()
call_args = fake_get_results.call_args
self.assertAlmostEqual(call_args.kwargs.get("timeout"), expected_timeout)
call_args = fake_reload.call_args
self.assertAlmostEqual(call_args.kwargs.get("timeout"), expected_timeout)
def test_query_plan(self):
from google.cloud._helpers import _RFC3339_MICROS
from google.cloud.bigquery.job import QueryPlanEntry
from google.cloud.bigquery.job import QueryPlanEntryStep
plan_entries = [
{
"name": "NAME",
"id": "1234",
"inputStages": ["88", "101"],
"startMs": "1522540800000",
"endMs": "1522540804000",
"parallelInputs": "1000",
"completedParallelInputs": "5",
"waitMsAvg": "33",
"waitMsMax": "400",
"waitRatioAvg": 2.71828,
"waitRatioMax": 3.14159,
"readMsAvg": "45",
"readMsMax": "90",
"readRatioAvg": 1.41421,
"readRatioMax": 1.73205,
"computeMsAvg": "55",
"computeMsMax": "99",
"computeRatioAvg": 0.69315,
"computeRatioMax": 1.09861,
"writeMsAvg": "203",
"writeMsMax": "340",
"writeRatioAvg": 3.32193,
"writeRatioMax": 2.30258,
"recordsRead": "100",
"recordsWritten": "1",
"status": "STATUS",
"shuffleOutputBytes": "1024",
"shuffleOutputBytesSpilled": "1",
"steps": [{"kind": "KIND", "substeps": ["SUBSTEP1", "SUBSTEP2"]}],
}
]
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertEqual(job.query_plan, [])
statistics = job._properties["statistics"] = {}
self.assertEqual(job.query_plan, [])
query_stats = statistics["query"] = {}
self.assertEqual(job.query_plan, [])
query_stats["queryPlan"] = plan_entries
self.assertEqual(len(job.query_plan), len(plan_entries))
for found, expected in zip(job.query_plan, plan_entries):
self.assertIsInstance(found, QueryPlanEntry)
self.assertEqual(found.name, expected["name"])
self.assertEqual(found.entry_id, expected["id"])
self.assertEqual(len(found.input_stages), len(expected["inputStages"]))
for f_id in found.input_stages:
self.assertIn(f_id, [int(e) for e in expected["inputStages"]])
self.assertEqual(
found.start.strftime(_RFC3339_MICROS), "2018-04-01T00:00:00.000000Z"
)
self.assertEqual(
found.end.strftime(_RFC3339_MICROS), "2018-04-01T00:00:04.000000Z"
)
self.assertEqual(found.parallel_inputs, int(expected["parallelInputs"]))
self.assertEqual(
found.completed_parallel_inputs,
int(expected["completedParallelInputs"]),
)
self.assertEqual(found.wait_ms_avg, int(expected["waitMsAvg"]))
self.assertEqual(found.wait_ms_max, int(expected["waitMsMax"]))
self.assertEqual(found.wait_ratio_avg, expected["waitRatioAvg"])
self.assertEqual(found.wait_ratio_max, expected["waitRatioMax"])
self.assertEqual(found.read_ms_avg, int(expected["readMsAvg"]))
self.assertEqual(found.read_ms_max, int(expected["readMsMax"]))
self.assertEqual(found.read_ratio_avg, expected["readRatioAvg"])
self.assertEqual(found.read_ratio_max, expected["readRatioMax"])
self.assertEqual(found.compute_ms_avg, int(expected["computeMsAvg"]))
self.assertEqual(found.compute_ms_max, int(expected["computeMsMax"]))
self.assertEqual(found.compute_ratio_avg, expected["computeRatioAvg"])
self.assertEqual(found.compute_ratio_max, expected["computeRatioMax"])
self.assertEqual(found.write_ms_avg, int(expected["writeMsAvg"]))
self.assertEqual(found.write_ms_max, int(expected["writeMsMax"]))
self.assertEqual(found.write_ratio_avg, expected["writeRatioAvg"])
self.assertEqual(found.write_ratio_max, expected["writeRatioMax"])
self.assertEqual(found.records_read, int(expected["recordsRead"]))
self.assertEqual(found.records_written, int(expected["recordsWritten"]))
self.assertEqual(found.status, expected["status"])
self.assertEqual(
found.shuffle_output_bytes, int(expected["shuffleOutputBytes"])
)
self.assertEqual(
found.shuffle_output_bytes_spilled,
int(expected["shuffleOutputBytesSpilled"]),
)
self.assertEqual(len(found.steps), len(expected["steps"]))
for f_step, e_step in zip(found.steps, expected["steps"]):
self.assertIsInstance(f_step, QueryPlanEntryStep)
self.assertEqual(f_step.kind, e_step["kind"])
self.assertEqual(f_step.substeps, e_step["substeps"])
def test_total_bytes_processed(self):
total_bytes = 1234
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertIsNone(job.total_bytes_processed)
statistics = job._properties["statistics"] = {}
self.assertIsNone(job.total_bytes_processed)
query_stats = statistics["query"] = {}
self.assertIsNone(job.total_bytes_processed)
query_stats["totalBytesProcessed"] = str(total_bytes)
self.assertEqual(job.total_bytes_processed, total_bytes)
def test_total_bytes_billed(self):
total_bytes = 1234
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertIsNone(job.total_bytes_billed)
statistics = job._properties["statistics"] = {}
self.assertIsNone(job.total_bytes_billed)
query_stats = statistics["query"] = {}
self.assertIsNone(job.total_bytes_billed)
query_stats["totalBytesBilled"] = str(total_bytes)
self.assertEqual(job.total_bytes_billed, total_bytes)
def test_billing_tier(self):
billing_tier = 1
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertIsNone(job.billing_tier)
statistics = job._properties["statistics"] = {}
self.assertIsNone(job.billing_tier)
query_stats = statistics["query"] = {}
self.assertIsNone(job.billing_tier)
query_stats["billingTier"] = billing_tier
self.assertEqual(job.billing_tier, billing_tier)
def test_cache_hit(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertIsNone(job.cache_hit)
statistics = job._properties["statistics"] = {}
self.assertIsNone(job.cache_hit)
query_stats = statistics["query"] = {}
self.assertIsNone(job.cache_hit)
query_stats["cacheHit"] = True
self.assertTrue(job.cache_hit)
def test_ddl_operation_performed(self):
op = "SKIP"
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertIsNone(job.ddl_operation_performed)
statistics = job._properties["statistics"] = {}
self.assertIsNone(job.ddl_operation_performed)
query_stats = statistics["query"] = {}
self.assertIsNone(job.ddl_operation_performed)
query_stats["ddlOperationPerformed"] = op
self.assertEqual(job.ddl_operation_performed, op)
def test_ddl_target_routine(self):
from google.cloud.bigquery.routine import RoutineReference
ref_routine = {
"projectId": self.PROJECT,
"datasetId": "ddl_ds",
"routineId": "targetroutine",
}
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertIsNone(job.ddl_target_routine)
statistics = job._properties["statistics"] = {}
self.assertIsNone(job.ddl_target_routine)
query_stats = statistics["query"] = {}
self.assertIsNone(job.ddl_target_routine)
query_stats["ddlTargetRoutine"] = ref_routine
self.assertIsInstance(job.ddl_target_routine, RoutineReference)
self.assertEqual(job.ddl_target_routine.routine_id, "targetroutine")
self.assertEqual(job.ddl_target_routine.dataset_id, "ddl_ds")
self.assertEqual(job.ddl_target_routine.project, self.PROJECT)
def test_ddl_target_table(self):
from google.cloud.bigquery.table import TableReference
ref_table = {
"projectId": self.PROJECT,
"datasetId": "ddl_ds",
"tableId": "targettable",
}
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertIsNone(job.ddl_target_table)
statistics = job._properties["statistics"] = {}
self.assertIsNone(job.ddl_target_table)
query_stats = statistics["query"] = {}
self.assertIsNone(job.ddl_target_table)
query_stats["ddlTargetTable"] = ref_table
self.assertIsInstance(job.ddl_target_table, TableReference)
self.assertEqual(job.ddl_target_table.table_id, "targettable")
self.assertEqual(job.ddl_target_table.dataset_id, "ddl_ds")
self.assertEqual(job.ddl_target_table.project, self.PROJECT)
def test_num_dml_affected_rows(self):
num_rows = 1234
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertIsNone(job.num_dml_affected_rows)
statistics = job._properties["statistics"] = {}
self.assertIsNone(job.num_dml_affected_rows)
query_stats = statistics["query"] = {}
self.assertIsNone(job.num_dml_affected_rows)
query_stats["numDmlAffectedRows"] = str(num_rows)
self.assertEqual(job.num_dml_affected_rows, num_rows)
def test_slot_millis(self):
millis = 1234
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertIsNone(job.slot_millis)
statistics = job._properties["statistics"] = {}
self.assertIsNone(job.slot_millis)
query_stats = statistics["query"] = {}
self.assertIsNone(job.slot_millis)
query_stats["totalSlotMs"] = millis
self.assertEqual(job.slot_millis, millis)
def test_statement_type(self):
statement_type = "SELECT"
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertIsNone(job.statement_type)
statistics = job._properties["statistics"] = {}
self.assertIsNone(job.statement_type)
query_stats = statistics["query"] = {}
self.assertIsNone(job.statement_type)
query_stats["statementType"] = statement_type
self.assertEqual(job.statement_type, statement_type)
def test_referenced_tables(self):
from google.cloud.bigquery.table import TableReference
ref_tables_resource = [
{"projectId": self.PROJECT, "datasetId": "dataset", "tableId": "local1"},
{"projectId": self.PROJECT, "datasetId": "dataset", "tableId": "local2"},
{
"projectId": "other-project-123",
"datasetId": "other-dataset",
"tableId": "other-table",
},
]
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertEqual(job.referenced_tables, [])
statistics = job._properties["statistics"] = {}
self.assertEqual(job.referenced_tables, [])
query_stats = statistics["query"] = {}
self.assertEqual(job.referenced_tables, [])
query_stats["referencedTables"] = ref_tables_resource
local1, local2, remote = job.referenced_tables
self.assertIsInstance(local1, TableReference)
self.assertEqual(local1.table_id, "local1")
self.assertEqual(local1.dataset_id, "dataset")
self.assertEqual(local1.project, self.PROJECT)
self.assertIsInstance(local2, TableReference)
self.assertEqual(local2.table_id, "local2")
self.assertEqual(local2.dataset_id, "dataset")
self.assertEqual(local2.project, self.PROJECT)
self.assertIsInstance(remote, TableReference)
self.assertEqual(remote.table_id, "other-table")
self.assertEqual(remote.dataset_id, "other-dataset")
self.assertEqual(remote.project, "other-project-123")
def test_timeline(self):
timeline_resource = [
{
"elapsedMs": 1,
"activeUnits": 22,
"pendingUnits": 33,
"completedUnits": 44,
"totalSlotMs": 101,
}
]
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertEqual(job.timeline, [])
statistics = job._properties["statistics"] = {}
self.assertEqual(job.timeline, [])
query_stats = statistics["query"] = {}
self.assertEqual(job.timeline, [])
query_stats["timeline"] = timeline_resource
self.assertEqual(len(job.timeline), len(timeline_resource))
self.assertEqual(job.timeline[0].elapsed_ms, 1)
self.assertEqual(job.timeline[0].active_units, 22)
self.assertEqual(job.timeline[0].pending_units, 33)
self.assertEqual(job.timeline[0].completed_units, 44)
self.assertEqual(job.timeline[0].slot_millis, 101)
def test_undeclared_query_parameters(self):
from google.cloud.bigquery.query import ArrayQueryParameter
from google.cloud.bigquery.query import ScalarQueryParameter
from google.cloud.bigquery.query import StructQueryParameter
undeclared = [
{
"name": "my_scalar",
"parameterType": {"type": "STRING"},
"parameterValue": {"value": "value"},
},
{
"name": "my_array",
"parameterType": {"type": "ARRAY", "arrayType": {"type": "INT64"}},
"parameterValue": {
"arrayValues": [{"value": "1066"}, {"value": "1745"}]
},
},
{
"name": "my_struct",
"parameterType": {
"type": "STRUCT",
"structTypes": [{"name": "count", "type": {"type": "INT64"}}],
},
"parameterValue": {"structValues": {"count": {"value": "123"}}},
},
]
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertEqual(job.undeclared_query_parameters, [])
statistics = job._properties["statistics"] = {}
self.assertEqual(job.undeclared_query_parameters, [])
query_stats = statistics["query"] = {}
self.assertEqual(job.undeclared_query_parameters, [])
query_stats["undeclaredQueryParameters"] = undeclared
scalar, array, struct = job.undeclared_query_parameters
self.assertIsInstance(scalar, ScalarQueryParameter)
self.assertEqual(scalar.name, "my_scalar")
self.assertEqual(scalar.type_, "STRING")
self.assertEqual(scalar.value, "value")
self.assertIsInstance(array, ArrayQueryParameter)
self.assertEqual(array.name, "my_array")
self.assertEqual(array.array_type, "INT64")
self.assertEqual(array.values, [1066, 1745])
self.assertIsInstance(struct, StructQueryParameter)
self.assertEqual(struct.name, "my_struct")
self.assertEqual(struct.struct_types, {"count": "INT64"})
self.assertEqual(struct.struct_values, {"count": 123})
def test_estimated_bytes_processed(self):
est_bytes = 123456
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertIsNone(job.estimated_bytes_processed)
statistics = job._properties["statistics"] = {}
self.assertIsNone(job.estimated_bytes_processed)
query_stats = statistics["query"] = {}
self.assertIsNone(job.estimated_bytes_processed)
query_stats["estimatedBytesProcessed"] = str(est_bytes)
self.assertEqual(job.estimated_bytes_processed, est_bytes)
def test_result(self):
from google.cloud.bigquery.table import RowIterator
query_resource = {
"jobComplete": True,
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"schema": {"fields": [{"name": "col1", "type": "STRING"}]},
"totalRows": "2",
}
tabledata_resource = {
# Explicitly set totalRows to be different from the query response.
# to test update during iteration.
"totalRows": "1",
"pageToken": None,
"rows": [{"f": [{"v": "abc"}]}],
}
connection = _make_connection(query_resource, tabledata_resource)
client = _make_client(self.PROJECT, connection=connection)
resource = self._make_resource(ended=True)
job = self._get_target_class().from_api_repr(resource, client)
result = job.result()
self.assertIsInstance(result, RowIterator)
self.assertEqual(result.total_rows, 2)
rows = list(result)
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0].col1, "abc")
# Test that the total_rows property has changed during iteration, based
# on the response from tabledata.list.
self.assertEqual(result.total_rows, 1)
def test_result_with_max_results(self):
from google.cloud.bigquery.table import RowIterator
query_resource = {
"jobComplete": True,
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"schema": {"fields": [{"name": "col1", "type": "STRING"}]},
"totalRows": "5",
}
tabledata_resource = {
"totalRows": "5",
"pageToken": None,
"rows": [
{"f": [{"v": "abc"}]},
{"f": [{"v": "def"}]},
{"f": [{"v": "ghi"}]},
],
}
connection = _make_connection(query_resource, tabledata_resource)
client = _make_client(self.PROJECT, connection=connection)
resource = self._make_resource(ended=True)
job = self._get_target_class().from_api_repr(resource, client)
max_results = 3
result = job.result(max_results=max_results)
self.assertIsInstance(result, RowIterator)
self.assertEqual(result.total_rows, 5)
rows = list(result)
self.assertEqual(len(rows), 3)
self.assertEqual(len(connection.api_request.call_args_list), 2)
tabledata_list_request = connection.api_request.call_args_list[1]
self.assertEqual(
tabledata_list_request[1]["query_params"]["maxResults"], max_results
)
def test_result_w_empty_schema(self):
from google.cloud.bigquery.table import _EmptyRowIterator
# Destination table may have no schema for some DDL and DML queries.
query_resource = {
"jobComplete": True,
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"schema": {"fields": []},
}
connection = _make_connection(query_resource, query_resource)
client = _make_client(self.PROJECT, connection=connection)
resource = self._make_resource(ended=True)
job = self._get_target_class().from_api_repr(resource, client)
result = job.result()
self.assertIsInstance(result, _EmptyRowIterator)
self.assertEqual(list(result), [])
def test_result_invokes_begins(self):
begun_resource = self._make_resource()
incomplete_resource = {
"jobComplete": False,
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"schema": {"fields": [{"name": "col1", "type": "STRING"}]},
}
query_resource = copy.deepcopy(incomplete_resource)
query_resource["jobComplete"] = True
done_resource = copy.deepcopy(begun_resource)
done_resource["status"] = {"state": "DONE"}
connection = _make_connection(
begun_resource,
incomplete_resource,
query_resource,
done_resource,
query_resource,
)
client = _make_client(project=self.PROJECT, connection=connection)
job = self._make_one(self.JOB_ID, self.QUERY, client)
job.result()
self.assertEqual(len(connection.api_request.call_args_list), 4)
begin_request = connection.api_request.call_args_list[0]
query_request = connection.api_request.call_args_list[2]
reload_request = connection.api_request.call_args_list[3]
self.assertEqual(begin_request[1]["method"], "POST")
self.assertEqual(query_request[1]["method"], "GET")
self.assertEqual(reload_request[1]["method"], "GET")
def test_result_w_timeout(self):
begun_resource = self._make_resource()
query_resource = {
"jobComplete": True,
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"schema": {"fields": [{"name": "col1", "type": "STRING"}]},
}
done_resource = copy.deepcopy(begun_resource)
done_resource["status"] = {"state": "DONE"}
connection = _make_connection(begun_resource, query_resource, done_resource)
client = _make_client(project=self.PROJECT, connection=connection)
job = self._make_one(self.JOB_ID, self.QUERY, client)
with freezegun.freeze_time("1970-01-01 00:00:00", tick=False):
job.result(timeout=1.0)
self.assertEqual(len(connection.api_request.call_args_list), 3)
begin_request = connection.api_request.call_args_list[0]
query_request = connection.api_request.call_args_list[1]
reload_request = connection.api_request.call_args_list[2]
self.assertEqual(begin_request[1]["method"], "POST")
self.assertEqual(query_request[1]["method"], "GET")
self.assertEqual(
query_request[1]["path"],
"/projects/{}/queries/{}".format(self.PROJECT, self.JOB_ID),
)
self.assertEqual(query_request[1]["query_params"]["timeoutMs"], 900)
self.assertEqual(reload_request[1]["method"], "GET")
@mock.patch("google.api_core.future.polling.PollingFuture.result")
def test_result_splitting_timout_between_requests(self, polling_result):
begun_resource = self._make_resource()
query_resource = {
"jobComplete": True,
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"schema": {"fields": [{"name": "col1", "type": "STRING"}]},
"totalRows": "5",
}
done_resource = copy.deepcopy(begun_resource)
done_resource["status"] = {"state": "DONE"}
connection = _make_connection(begun_resource, query_resource, done_resource)
client = _make_client(project=self.PROJECT, connection=connection)
job = self._make_one(self.JOB_ID, self.QUERY, client)
client.list_rows = mock.Mock()
with freezegun.freeze_time("1970-01-01 00:00:00", tick=False) as frozen_time:
def delayed_result(*args, **kwargs):
frozen_time.tick(delta=0.8)
polling_result.side_effect = delayed_result
def delayed_get_results(*args, **kwargs):
frozen_time.tick(delta=0.5)
return orig_get_results(*args, **kwargs)
orig_get_results = client._get_query_results
client._get_query_results = mock.Mock(side_effect=delayed_get_results)
job.result(timeout=2.0)
polling_result.assert_called_once_with(timeout=2.0)
client._get_query_results.assert_called_once()
_, kwargs = client._get_query_results.call_args
self.assertAlmostEqual(kwargs.get("timeout"), 1.2)
client.list_rows.assert_called_once()
_, kwargs = client.list_rows.call_args
self.assertAlmostEqual(kwargs.get("timeout"), 0.7)
def test_result_w_page_size(self):
# Arrange
query_results_resource = {
"jobComplete": True,
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"schema": {"fields": [{"name": "col1", "type": "STRING"}]},
"totalRows": "4",
}
job_resource = self._make_resource(started=True, ended=True)
q_config = job_resource["configuration"]["query"]
q_config["destinationTable"] = {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.TABLE_ID,
}
tabledata_resource = {
"totalRows": 4,
"pageToken": "some-page-token",
"rows": [
{"f": [{"v": "row1"}]},
{"f": [{"v": "row2"}]},
{"f": [{"v": "row3"}]},
],
}
tabledata_resource_page_2 = {"totalRows": 4, "rows": [{"f": [{"v": "row4"}]}]}
conn = _make_connection(
query_results_resource, tabledata_resource, tabledata_resource_page_2
)
client = _make_client(self.PROJECT, connection=conn)
job = self._get_target_class().from_api_repr(job_resource, client)
# Act
result = job.result(page_size=3)
# Assert
actual_rows = list(result)
self.assertEqual(len(actual_rows), 4)
tabledata_path = "/projects/%s/datasets/%s/tables/%s/data" % (
self.PROJECT,
self.DS_ID,
self.TABLE_ID,
)
conn.api_request.assert_has_calls(
[
mock.call(
method="GET",
path=tabledata_path,
query_params={"maxResults": 3},
timeout=None,
),
mock.call(
method="GET",
path=tabledata_path,
query_params={"pageToken": "some-page-token", "maxResults": 3},
timeout=None,
),
]
)
def test_result_error(self):
from google.cloud import exceptions
query = textwrap.dedent(
"""
SELECT foo, bar
FROM table_baz
WHERE foo == bar"""
)
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, query, client)
error_result = {
"debugInfo": "DEBUG",
"location": "LOCATION",
"message": "MESSAGE",
"reason": "invalid",
}
job._properties["status"] = {
"errorResult": error_result,
"errors": [error_result],
"state": "DONE",
}
job._set_future_result()
with self.assertRaises(exceptions.GoogleCloudError) as exc_info:
job.result()
self.assertIsInstance(exc_info.exception, exceptions.GoogleCloudError)
self.assertEqual(exc_info.exception.code, http_client.BAD_REQUEST)
exc_job_instance = getattr(exc_info.exception, "query_job", None)
self.assertIs(exc_job_instance, job)
full_text = str(exc_info.exception)
assert job.job_id in full_text
assert "Query Job SQL Follows" in full_text
for i, line in enumerate(query.splitlines(), start=1):
expected_line = "{}:{}".format(i, line)
assert expected_line in full_text
def test_result_transport_timeout_error(self):
query = textwrap.dedent(
"""
SELECT foo, bar
FROM table_baz
WHERE foo == bar"""
)
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, query, client)
call_api_patch = mock.patch(
"google.cloud.bigquery.client.Client._call_api",
autospec=True,
side_effect=requests.exceptions.Timeout("Server response took too long."),
)
# Make sure that timeout errors get rebranded to concurrent futures timeout.
with call_api_patch, self.assertRaises(concurrent.futures.TimeoutError):
job.result(timeout=1)
def test__begin_error(self):
from google.cloud import exceptions
query = textwrap.dedent(
"""
SELECT foo, bar
FROM table_baz
WHERE foo == bar"""
)
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, query, client)
call_api_patch = mock.patch(
"google.cloud.bigquery.client.Client._call_api",
autospec=True,
side_effect=exceptions.BadRequest("Syntax error in SQL query"),
)
with call_api_patch, self.assertRaises(exceptions.GoogleCloudError) as exc_info:
job.result()
self.assertIsInstance(exc_info.exception, exceptions.GoogleCloudError)
self.assertEqual(exc_info.exception.code, http_client.BAD_REQUEST)
exc_job_instance = getattr(exc_info.exception, "query_job", None)
self.assertIs(exc_job_instance, job)
full_text = str(exc_info.exception)
assert job.job_id in full_text
assert "Query Job SQL Follows" in full_text
for i, line in enumerate(query.splitlines(), start=1):
expected_line = "{}:{}".format(i, line)
assert expected_line in full_text
def test__begin_w_timeout(self):
PATH = "/projects/%s/jobs" % (self.PROJECT,)
RESOURCE = self._make_resource()
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
job = self._make_one(self.JOB_ID, self.QUERY, client)
job._begin(timeout=7.5)
conn.api_request.assert_called_once_with(
method="POST",
path=PATH,
data={
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {
"query": {"query": self.QUERY, "useLegacySql": False}
},
},
timeout=7.5,
)
def test_begin_w_bound_client(self):
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery.job import QueryJobConfig
PATH = "/projects/%s/jobs" % (self.PROJECT,)
DS_ID = "DATASET"
RESOURCE = self._make_resource()
# Ensure None for missing server-set props
del RESOURCE["statistics"]["creationTime"]
del RESOURCE["etag"]
del RESOURCE["selfLink"]
del RESOURCE["user_email"]
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
config = QueryJobConfig()
config.default_dataset = DatasetReference(self.PROJECT, DS_ID)
job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=config)
job._begin()
self.assertIsNone(job.default_dataset)
self.assertEqual(job.udf_resources, [])
conn.api_request.assert_called_once_with(
method="POST",
path=PATH,
data={
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {
"query": {
"query": self.QUERY,
"useLegacySql": False,
"defaultDataset": {
"projectId": self.PROJECT,
"datasetId": DS_ID,
},
}
},
},
timeout=None,
)
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_alternate_client(self):
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import QueryJobConfig
from google.cloud.bigquery.job import QueryPriority
from google.cloud.bigquery.job import SchemaUpdateOption
from google.cloud.bigquery.job import WriteDisposition
PATH = "/projects/%s/jobs" % (self.PROJECT,)
TABLE = "TABLE"
DS_ID = "DATASET"
RESOURCE = self._make_resource(ended=True)
QUERY_CONFIGURATION = {
"query": self.QUERY,
"allowLargeResults": True,
"createDisposition": CreateDisposition.CREATE_NEVER,
"defaultDataset": {"projectId": self.PROJECT, "datasetId": DS_ID},
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": DS_ID,
"tableId": TABLE,
},
"flattenResults": True,
"priority": QueryPriority.INTERACTIVE,
"useQueryCache": True,
"useLegacySql": True,
"writeDisposition": WriteDisposition.WRITE_TRUNCATE,
"maximumBillingTier": 4,
"maximumBytesBilled": "123456",
"schemaUpdateOptions": [SchemaUpdateOption.ALLOW_FIELD_RELAXATION],
}
RESOURCE["configuration"]["query"] = QUERY_CONFIGURATION
RESOURCE["configuration"]["dryRun"] = True
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection(RESOURCE)
client2 = _make_client(project=self.PROJECT, connection=conn2)
dataset_ref = DatasetReference(self.PROJECT, DS_ID)
table_ref = dataset_ref.table(TABLE)
config = QueryJobConfig()
config.allow_large_results = True
config.create_disposition = CreateDisposition.CREATE_NEVER
config.default_dataset = dataset_ref
config.destination = table_ref
config.dry_run = True
config.flatten_results = True
config.maximum_billing_tier = 4
config.priority = QueryPriority.INTERACTIVE
config.use_legacy_sql = True
config.use_query_cache = True
config.write_disposition = WriteDisposition.WRITE_TRUNCATE
config.maximum_bytes_billed = 123456
config.schema_update_options = [SchemaUpdateOption.ALLOW_FIELD_RELAXATION]
job = self._make_one(self.JOB_ID, self.QUERY, client1, job_config=config)
job._begin(client=client2)
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method="POST",
path=PATH,
data={
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {"dryRun": True, "query": QUERY_CONFIGURATION},
},
timeout=None,
)
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_udf(self):
from google.cloud.bigquery.job import QueryJobConfig
from google.cloud.bigquery.query import UDFResource
RESOURCE_URI = "gs://some-bucket/js/lib.js"
INLINE_UDF_CODE = 'var someCode = "here";'
PATH = "/projects/%s/jobs" % (self.PROJECT,)
RESOURCE = self._make_resource()
# Ensure None for missing server-set props
del RESOURCE["statistics"]["creationTime"]
del RESOURCE["etag"]
del RESOURCE["selfLink"]
del RESOURCE["user_email"]
RESOURCE["configuration"]["query"]["userDefinedFunctionResources"] = [
{"resourceUri": RESOURCE_URI},
{"inlineCode": INLINE_UDF_CODE},
]
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
udf_resources = [
UDFResource("resourceUri", RESOURCE_URI),
UDFResource("inlineCode", INLINE_UDF_CODE),
]
config = QueryJobConfig()
config.udf_resources = udf_resources
config.use_legacy_sql = True
job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=config)
job._begin()
self.assertEqual(job.udf_resources, udf_resources)
conn.api_request.assert_called_once_with(
method="POST",
path=PATH,
data={
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {
"query": {
"query": self.QUERY,
"useLegacySql": True,
"userDefinedFunctionResources": [
{"resourceUri": RESOURCE_URI},
{"inlineCode": INLINE_UDF_CODE},
],
}
},
},
timeout=None,
)
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_named_query_parameter(self):
from google.cloud.bigquery.job import QueryJobConfig
from google.cloud.bigquery.query import ScalarQueryParameter
query_parameters = [ScalarQueryParameter("foo", "INT64", 123)]
PATH = "/projects/%s/jobs" % (self.PROJECT,)
RESOURCE = self._make_resource()
# Ensure None for missing server-set props
del RESOURCE["statistics"]["creationTime"]
del RESOURCE["etag"]
del RESOURCE["selfLink"]
del RESOURCE["user_email"]
config = RESOURCE["configuration"]["query"]
config["parameterMode"] = "NAMED"
config["queryParameters"] = [
{
"name": "foo",
"parameterType": {"type": "INT64"},
"parameterValue": {"value": "123"},
}
]
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
jconfig = QueryJobConfig()
jconfig.query_parameters = query_parameters
job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=jconfig)
job._begin()
self.assertEqual(job.query_parameters, query_parameters)
conn.api_request.assert_called_once_with(
method="POST",
path=PATH,
data={
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {
"query": {
"query": self.QUERY,
"useLegacySql": False,
"parameterMode": "NAMED",
"queryParameters": config["queryParameters"],
}
},
},
timeout=None,
)
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_positional_query_parameter(self):
from google.cloud.bigquery.job import QueryJobConfig
from google.cloud.bigquery.query import ScalarQueryParameter
query_parameters = [ScalarQueryParameter.positional("INT64", 123)]
PATH = "/projects/%s/jobs" % (self.PROJECT,)
RESOURCE = self._make_resource()
# Ensure None for missing server-set props
del RESOURCE["statistics"]["creationTime"]
del RESOURCE["etag"]
del RESOURCE["selfLink"]
del RESOURCE["user_email"]
config = RESOURCE["configuration"]["query"]
config["parameterMode"] = "POSITIONAL"
config["queryParameters"] = [
{"parameterType": {"type": "INT64"}, "parameterValue": {"value": "123"}}
]
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
jconfig = QueryJobConfig()
jconfig.query_parameters = query_parameters
job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=jconfig)
job._begin()
self.assertEqual(job.query_parameters, query_parameters)
conn.api_request.assert_called_once_with(
method="POST",
path=PATH,
data={
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {
"query": {
"query": self.QUERY,
"useLegacySql": False,
"parameterMode": "POSITIONAL",
"queryParameters": config["queryParameters"],
}
},
},
timeout=None,
)
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_table_defs(self):
from google.cloud.bigquery.job import QueryJobConfig
from google.cloud.bigquery.external_config import ExternalConfig
from google.cloud.bigquery.external_config import BigtableColumn
from google.cloud.bigquery.external_config import BigtableColumnFamily
PATH = "/projects/%s/jobs" % (self.PROJECT,)
RESOURCE = self._make_resource()
# Ensure None for missing server-set props
del RESOURCE["statistics"]["creationTime"]
del RESOURCE["etag"]
del RESOURCE["selfLink"]
del RESOURCE["user_email"]
bt_config = ExternalConfig("BIGTABLE")
bt_config.ignore_unknown_values = True
bt_config.options.read_rowkey_as_string = True
cf = BigtableColumnFamily()
cf.family_id = "cf"
col = BigtableColumn()
col.field_name = "fn"
cf.columns = [col]
bt_config.options.column_families = [cf]
BT_CONFIG_RESOURCE = {
"sourceFormat": "BIGTABLE",
"ignoreUnknownValues": True,
"bigtableOptions": {
"readRowkeyAsString": True,
"columnFamilies": [
{"familyId": "cf", "columns": [{"fieldName": "fn"}]}
],
},
}
CSV_CONFIG_RESOURCE = {
"sourceFormat": "CSV",
"maxBadRecords": 8,
"csvOptions": {"allowJaggedRows": True},
}
csv_config = ExternalConfig("CSV")
csv_config.max_bad_records = 8
csv_config.options.allow_jagged_rows = True
bt_table = "bigtable-table"
csv_table = "csv-table"
RESOURCE["configuration"]["query"]["tableDefinitions"] = {
bt_table: BT_CONFIG_RESOURCE,
csv_table: CSV_CONFIG_RESOURCE,
}
want_resource = copy.deepcopy(RESOURCE)
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
config = QueryJobConfig()
config.table_definitions = {bt_table: bt_config, csv_table: csv_config}
config.use_legacy_sql = True
job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=config)
job._begin()
conn.api_request.assert_called_once_with(
method="POST",
path=PATH,
data={
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {
"query": {
"query": self.QUERY,
"useLegacySql": True,
"tableDefinitions": {
bt_table: BT_CONFIG_RESOURCE,
csv_table: CSV_CONFIG_RESOURCE,
},
}
},
},
timeout=None,
)
self._verifyResourceProperties(job, want_resource)
def test_dry_run_query(self):
from google.cloud.bigquery.job import QueryJobConfig
PATH = "/projects/%s/jobs" % (self.PROJECT,)
RESOURCE = self._make_resource()
# Ensure None for missing server-set props
del RESOURCE["statistics"]["creationTime"]
del RESOURCE["etag"]
del RESOURCE["selfLink"]
del RESOURCE["user_email"]
RESOURCE["configuration"]["dryRun"] = True
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
config = QueryJobConfig()
config.dry_run = True
job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=config)
job._begin()
self.assertEqual(job.udf_resources, [])
conn.api_request.assert_called_once_with(
method="POST",
path=PATH,
data={
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"configuration": {
"query": {"query": self.QUERY, "useLegacySql": False},
"dryRun": True,
},
},
timeout=None,
)
self._verifyResourceProperties(job, RESOURCE)
def test_exists_miss_w_bound_client(self):
PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
conn = _make_connection()
client = _make_client(project=self.PROJECT, connection=conn)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertFalse(job.exists())
conn.api_request.assert_called_once_with(
method="GET", path=PATH, query_params={"fields": "id"}, timeout=None
)
def test_exists_hit_w_alternate_client(self):
PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection({})
client2 = _make_client(project=self.PROJECT, connection=conn2)
job = self._make_one(self.JOB_ID, self.QUERY, client1)
self.assertTrue(job.exists(client=client2))
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method="GET", path=PATH, query_params={"fields": "id"}, timeout=None
)
def test_reload_w_bound_client(self):
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery.job import QueryJobConfig
PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
DS_ID = "DATASET"
DEST_TABLE = "dest_table"
RESOURCE = self._make_resource()
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
dataset_ref = DatasetReference(self.PROJECT, DS_ID)
table_ref = dataset_ref.table(DEST_TABLE)
config = QueryJobConfig()
config.destination = table_ref
job = self._make_one(self.JOB_ID, None, client, job_config=config)
job.reload()
self.assertNotEqual(job.destination, table_ref)
conn.api_request.assert_called_once_with(
method="GET", path=PATH, query_params={}, timeout=None
)
self._verifyResourceProperties(job, RESOURCE)
def test_reload_w_alternate_client(self):
PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
DS_ID = "DATASET"
DEST_TABLE = "dest_table"
RESOURCE = self._make_resource()
q_config = RESOURCE["configuration"]["query"]
q_config["destinationTable"] = {
"projectId": self.PROJECT,
"datasetId": DS_ID,
"tableId": DEST_TABLE,
}
conn1 = _make_connection()
client1 = _make_client(project=self.PROJECT, connection=conn1)
conn2 = _make_connection(RESOURCE)
client2 = _make_client(project=self.PROJECT, connection=conn2)
job = self._make_one(self.JOB_ID, self.QUERY, client1)
job.reload(client=client2)
conn1.api_request.assert_not_called()
conn2.api_request.assert_called_once_with(
method="GET", path=PATH, query_params={}, timeout=None
)
self._verifyResourceProperties(job, RESOURCE)
def test_reload_w_timeout(self):
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery.job import QueryJobConfig
PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)
DS_ID = "DATASET"
DEST_TABLE = "dest_table"
RESOURCE = self._make_resource()
conn = _make_connection(RESOURCE)
client = _make_client(project=self.PROJECT, connection=conn)
dataset_ref = DatasetReference(self.PROJECT, DS_ID)
table_ref = dataset_ref.table(DEST_TABLE)
config = QueryJobConfig()
config.destination = table_ref
job = self._make_one(self.JOB_ID, None, client, job_config=config)
job.reload(timeout=4.2)
self.assertNotEqual(job.destination, table_ref)
conn.api_request.assert_called_once_with(
method="GET", path=PATH, query_params={}, timeout=4.2
)
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_to_arrow(self):
begun_resource = self._make_resource()
query_resource = {
"jobComplete": True,
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"totalRows": "4",
"schema": {
"fields": [
{
"name": "spouse_1",
"type": "RECORD",
"fields": [
{"name": "name", "type": "STRING", "mode": "NULLABLE"},
{"name": "age", "type": "INTEGER", "mode": "NULLABLE"},
],
},
{
"name": "spouse_2",
"type": "RECORD",
"fields": [
{"name": "name", "type": "STRING", "mode": "NULLABLE"},
{"name": "age", "type": "INTEGER", "mode": "NULLABLE"},
],
},
]
},
}
tabledata_resource = {
"rows": [
{
"f": [
{"v": {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]}},
{"v": {"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]}},
]
},
{
"f": [
{"v": {"f": [{"v": "Bhettye Rhubble"}, {"v": "27"}]}},
{"v": {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]}},
]
},
]
}
done_resource = copy.deepcopy(begun_resource)
done_resource["status"] = {"state": "DONE"}
connection = _make_connection(
begun_resource, query_resource, done_resource, tabledata_resource
)
client = _make_client(project=self.PROJECT, connection=connection)
job = self._make_one(self.JOB_ID, self.QUERY, client)
tbl = job.to_arrow()
self.assertIsInstance(tbl, pyarrow.Table)
self.assertEqual(tbl.num_rows, 2)
# Check the schema.
self.assertEqual(tbl.schema[0].name, "spouse_1")
self.assertEqual(tbl.schema[0].type[0].name, "name")
self.assertEqual(tbl.schema[0].type[1].name, "age")
self.assertTrue(pyarrow.types.is_struct(tbl.schema[0].type))
self.assertTrue(pyarrow.types.is_string(tbl.schema[0].type[0].type))
self.assertTrue(pyarrow.types.is_int64(tbl.schema[0].type[1].type))
self.assertEqual(tbl.schema[1].name, "spouse_2")
self.assertEqual(tbl.schema[1].type[0].name, "name")
self.assertEqual(tbl.schema[1].type[1].name, "age")
self.assertTrue(pyarrow.types.is_struct(tbl.schema[1].type))
self.assertTrue(pyarrow.types.is_string(tbl.schema[1].type[0].type))
self.assertTrue(pyarrow.types.is_int64(tbl.schema[1].type[1].type))
# Check the data.
tbl_data = tbl.to_pydict()
spouse_1 = tbl_data["spouse_1"]
self.assertEqual(
spouse_1,
[
{"name": "Phred Phlyntstone", "age": 32},
{"name": "Bhettye Rhubble", "age": 27},
],
)
spouse_2 = tbl_data["spouse_2"]
self.assertEqual(
spouse_2,
[
{"name": "Wylma Phlyntstone", "age": 29},
{"name": "Bharney Rhubble", "age": 33},
],
)
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_to_dataframe(self):
begun_resource = self._make_resource()
query_resource = {
"jobComplete": True,
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"totalRows": "4",
"schema": {
"fields": [
{"name": "name", "type": "STRING", "mode": "NULLABLE"},
{"name": "age", "type": "INTEGER", "mode": "NULLABLE"},
]
},
}
tabledata_resource = {
"rows": [
{"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
{"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
{"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]},
{"f": [{"v": "Bhettye Rhubble"}, {"v": "27"}]},
]
}
done_resource = copy.deepcopy(begun_resource)
done_resource["status"] = {"state": "DONE"}
connection = _make_connection(
begun_resource, query_resource, done_resource, tabledata_resource
)
client = _make_client(project=self.PROJECT, connection=connection)
job = self._make_one(self.JOB_ID, self.QUERY, client)
df = job.to_dataframe()
self.assertIsInstance(df, pandas.DataFrame)
self.assertEqual(len(df), 4) # verify the number of rows
self.assertEqual(list(df), ["name", "age"]) # verify the column names
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_to_dataframe_ddl_query(self):
# Destination table may have no schema for some DDL and DML queries.
query_resource = {
"jobComplete": True,
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"schema": {"fields": []},
}
connection = _make_connection(query_resource)
client = _make_client(self.PROJECT, connection=connection)
resource = self._make_resource(ended=True)
job = self._get_target_class().from_api_repr(resource, client)
df = job.to_dataframe()
self.assertEqual(len(df), 0)
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(
bigquery_storage_v1beta1 is None, "Requires `google-cloud-bigquery-storage`"
)
def test_to_dataframe_bqstorage(self):
query_resource = {
"jobComplete": True,
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"totalRows": "4",
"schema": {
"fields": [
{"name": "name", "type": "STRING", "mode": "NULLABLE"},
{"name": "age", "type": "INTEGER", "mode": "NULLABLE"},
]
},
}
connection = _make_connection(query_resource)
client = _make_client(self.PROJECT, connection=connection)
resource = self._make_resource(ended=True)
job = self._get_target_class().from_api_repr(resource, client)
bqstorage_client = mock.create_autospec(
bigquery_storage_v1beta1.BigQueryStorageClient
)
session = bigquery_storage_v1beta1.types.ReadSession()
session.avro_schema.schema = json.dumps(
{
"type": "record",
"name": "__root__",
"fields": [
{"name": "name", "type": ["null", "string"]},
{"name": "age", "type": ["null", "long"]},
],
}
)
bqstorage_client.create_read_session.return_value = session
job.to_dataframe(bqstorage_client=bqstorage_client)
bqstorage_client.create_read_session.assert_called_once_with(
mock.ANY,
"projects/{}".format(self.PROJECT),
format_=bigquery_storage_v1beta1.enums.DataFormat.ARROW,
read_options=mock.ANY,
# Use default number of streams for best performance.
requested_streams=0,
)
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_to_dataframe_column_dtypes(self):
begun_resource = self._make_resource()
query_resource = {
"jobComplete": True,
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"totalRows": "4",
"schema": {
"fields": [
{"name": "start_timestamp", "type": "TIMESTAMP"},
{"name": "seconds", "type": "INT64"},
{"name": "miles", "type": "FLOAT64"},
{"name": "km", "type": "FLOAT64"},
{"name": "payment_type", "type": "STRING"},
{"name": "complete", "type": "BOOL"},
{"name": "date", "type": "DATE"},
]
},
}
row_data = [
["1.4338368E9", "420", "1.1", "1.77", "Cash", "true", "1999-12-01"],
["1.3878117E9", "2580", "17.7", "28.5", "Cash", "false", "1953-06-14"],
["1.3855653E9", "2280", "4.4", "7.1", "Credit", "true", "1981-11-04"],
]
rows = [{"f": [{"v": field} for field in row]} for row in row_data]
query_resource["rows"] = rows
done_resource = copy.deepcopy(begun_resource)
done_resource["status"] = {"state": "DONE"}
connection = _make_connection(
begun_resource, query_resource, done_resource, query_resource
)
client = _make_client(project=self.PROJECT, connection=connection)
job = self._make_one(self.JOB_ID, self.QUERY, client)
df = job.to_dataframe(dtypes={"km": "float16"})
self.assertIsInstance(df, pandas.DataFrame)
self.assertEqual(len(df), 3) # verify the number of rows
exp_columns = [field["name"] for field in query_resource["schema"]["fields"]]
self.assertEqual(list(df), exp_columns) # verify the column names
self.assertEqual(df.start_timestamp.dtype.name, "datetime64[ns, UTC]")
self.assertEqual(df.seconds.dtype.name, "int64")
self.assertEqual(df.miles.dtype.name, "float64")
self.assertEqual(df.km.dtype.name, "float16")
self.assertEqual(df.payment_type.dtype.name, "object")
self.assertEqual(df.complete.dtype.name, "bool")
self.assertEqual(df.date.dtype.name, "object")
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(tqdm is None, "Requires `tqdm`")
@mock.patch("tqdm.tqdm")
def test_to_dataframe_with_progress_bar(self, tqdm_mock):
begun_resource = self._make_resource()
query_resource = {
"jobComplete": True,
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"totalRows": "4",
"schema": {
"fields": [{"name": "name", "type": "STRING", "mode": "NULLABLE"}]
},
}
done_resource = copy.deepcopy(begun_resource)
done_resource["status"] = {"state": "DONE"}
connection = _make_connection(
begun_resource,
query_resource,
done_resource,
query_resource,
query_resource,
)
client = _make_client(project=self.PROJECT, connection=connection)
job = self._make_one(self.JOB_ID, self.QUERY, client)
job.to_dataframe(progress_bar_type=None)
tqdm_mock.assert_not_called()
job.to_dataframe(progress_bar_type="tqdm")
tqdm_mock.assert_called()
def test_iter(self):
import types
begun_resource = self._make_resource()
query_resource = {
"jobComplete": True,
"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID},
"totalRows": "0",
"schema": {"fields": [{"name": "col1", "type": "STRING"}]},
}
done_resource = copy.deepcopy(begun_resource)
done_resource["status"] = {"state": "DONE"}
connection = _make_connection(begun_resource, query_resource, done_resource)
client = _make_client(project=self.PROJECT, connection=connection)
job = self._make_one(self.JOB_ID, self.QUERY, client)
self.assertIsInstance(iter(job), types.GeneratorType)
class TestQueryPlanEntryStep(unittest.TestCase, _Base):
KIND = "KIND"
SUBSTEPS = ("SUB1", "SUB2")
@staticmethod
def _get_target_class():
from google.cloud.bigquery.job import QueryPlanEntryStep
return QueryPlanEntryStep
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
step = self._make_one(self.KIND, self.SUBSTEPS)
self.assertEqual(step.kind, self.KIND)
self.assertEqual(step.substeps, list(self.SUBSTEPS))
def test_from_api_repr_empty(self):
klass = self._get_target_class()
step = klass.from_api_repr({})
self.assertIsNone(step.kind)
self.assertEqual(step.substeps, [])
def test_from_api_repr_normal(self):
resource = {"kind": self.KIND, "substeps": self.SUBSTEPS}
klass = self._get_target_class()
step = klass.from_api_repr(resource)
self.assertEqual(step.kind, self.KIND)
self.assertEqual(step.substeps, list(self.SUBSTEPS))
def test___eq___mismatched_type(self):
step = self._make_one(self.KIND, self.SUBSTEPS)
self.assertNotEqual(step, object())
def test___eq___mismatch_kind(self):
step = self._make_one(self.KIND, self.SUBSTEPS)
other = self._make_one("OTHER", self.SUBSTEPS)
self.assertNotEqual(step, other)
def test___eq___mismatch_substeps(self):
step = self._make_one(self.KIND, self.SUBSTEPS)
other = self._make_one(self.KIND, ())
self.assertNotEqual(step, other)
def test___eq___hit(self):
step = self._make_one(self.KIND, self.SUBSTEPS)
other = self._make_one(self.KIND, self.SUBSTEPS)
self.assertEqual(step, other)
def test___eq___wrong_type(self):
step = self._make_one(self.KIND, self.SUBSTEPS)
self.assertFalse(step == "hello")
class TestQueryPlanEntry(unittest.TestCase, _Base):
NAME = "NAME"
ENTRY_ID = 1234
START_MS = 1522540800000
END_MS = 1522540804000
INPUT_STAGES = (88, 101)
PARALLEL_INPUTS = 1000
COMPLETED_PARALLEL_INPUTS = 5
WAIT_MS_AVG = 33
WAIT_MS_MAX = 400
WAIT_RATIO_AVG = 2.71828
WAIT_RATIO_MAX = 3.14159
READ_MS_AVG = 45
READ_MS_MAX = 90
READ_RATIO_AVG = 1.41421
READ_RATIO_MAX = 1.73205
COMPUTE_MS_AVG = 55
COMPUTE_MS_MAX = 99
COMPUTE_RATIO_AVG = 0.69315
COMPUTE_RATIO_MAX = 1.09861
WRITE_MS_AVG = 203
WRITE_MS_MAX = 340
WRITE_RATIO_AVG = 3.32193
WRITE_RATIO_MAX = 2.30258
RECORDS_READ = 100
RECORDS_WRITTEN = 1
STATUS = "STATUS"
SHUFFLE_OUTPUT_BYTES = 1024
SHUFFLE_OUTPUT_BYTES_SPILLED = 1
START_RFC3339_MICROS = "2018-04-01T00:00:00.000000Z"
END_RFC3339_MICROS = "2018-04-01T00:00:04.000000Z"
@staticmethod
def _get_target_class():
from google.cloud.bigquery.job import QueryPlanEntry
return QueryPlanEntry
def test_from_api_repr_empty(self):
klass = self._get_target_class()
entry = klass.from_api_repr({})
self.assertIsNone(entry.name)
self.assertIsNone(entry.entry_id)
self.assertEqual(entry.input_stages, [])
self.assertIsNone(entry.start)
self.assertIsNone(entry.end)
self.assertIsNone(entry.parallel_inputs)
self.assertIsNone(entry.completed_parallel_inputs)
self.assertIsNone(entry.wait_ms_avg)
self.assertIsNone(entry.wait_ms_max)
self.assertIsNone(entry.wait_ratio_avg)
self.assertIsNone(entry.wait_ratio_max)
self.assertIsNone(entry.read_ms_avg)
self.assertIsNone(entry.read_ms_max)
self.assertIsNone(entry.read_ratio_avg)
self.assertIsNone(entry.read_ratio_max)
self.assertIsNone(entry.compute_ms_avg)
self.assertIsNone(entry.compute_ms_max)
self.assertIsNone(entry.compute_ratio_avg)
self.assertIsNone(entry.compute_ratio_max)
self.assertIsNone(entry.write_ms_avg)
self.assertIsNone(entry.write_ms_max)
self.assertIsNone(entry.write_ratio_avg)
self.assertIsNone(entry.write_ratio_max)
self.assertIsNone(entry.records_read)
self.assertIsNone(entry.records_written)
self.assertIsNone(entry.status)
self.assertIsNone(entry.shuffle_output_bytes)
self.assertIsNone(entry.shuffle_output_bytes_spilled)
self.assertEqual(entry.steps, [])
def test_from_api_repr_normal(self):
from google.cloud.bigquery.job import QueryPlanEntryStep
steps = [
QueryPlanEntryStep(
kind=TestQueryPlanEntryStep.KIND,
substeps=TestQueryPlanEntryStep.SUBSTEPS,
)
]
resource = {
"name": self.NAME,
"id": self.ENTRY_ID,
"inputStages": self.INPUT_STAGES,
"startMs": self.START_MS,
"endMs": self.END_MS,
"waitMsAvg": self.WAIT_MS_AVG,
"waitMsMax": self.WAIT_MS_MAX,
"waitRatioAvg": self.WAIT_RATIO_AVG,
"waitRatioMax": self.WAIT_RATIO_MAX,
"readMsAvg": self.READ_MS_AVG,
"readMsMax": self.READ_MS_MAX,
"readRatioAvg": self.READ_RATIO_AVG,
"readRatioMax": self.READ_RATIO_MAX,
"computeMsAvg": self.COMPUTE_MS_AVG,
"computeMsMax": self.COMPUTE_MS_MAX,
"computeRatioAvg": self.COMPUTE_RATIO_AVG,
"computeRatioMax": self.COMPUTE_RATIO_MAX,
"writeMsAvg": self.WRITE_MS_AVG,
"writeMsMax": self.WRITE_MS_MAX,
"writeRatioAvg": self.WRITE_RATIO_AVG,
"writeRatioMax": self.WRITE_RATIO_MAX,
"recordsRead": self.RECORDS_READ,
"recordsWritten": self.RECORDS_WRITTEN,
"status": self.STATUS,
"shuffleOutputBytes": self.SHUFFLE_OUTPUT_BYTES,
"shuffleOutputBytesSpilled": self.SHUFFLE_OUTPUT_BYTES_SPILLED,
"steps": [
{
"kind": TestQueryPlanEntryStep.KIND,
"substeps": TestQueryPlanEntryStep.SUBSTEPS,
}
],
}
klass = self._get_target_class()
entry = klass.from_api_repr(resource)
self.assertEqual(entry.name, self.NAME)
self.assertEqual(entry.entry_id, self.ENTRY_ID)
self.assertEqual(entry.wait_ratio_avg, self.WAIT_RATIO_AVG)
self.assertEqual(entry.wait_ratio_max, self.WAIT_RATIO_MAX)
self.assertEqual(entry.read_ratio_avg, self.READ_RATIO_AVG)
self.assertEqual(entry.read_ratio_max, self.READ_RATIO_MAX)
self.assertEqual(entry.compute_ratio_avg, self.COMPUTE_RATIO_AVG)
self.assertEqual(entry.compute_ratio_max, self.COMPUTE_RATIO_MAX)
self.assertEqual(entry.write_ratio_avg, self.WRITE_RATIO_AVG)
self.assertEqual(entry.write_ratio_max, self.WRITE_RATIO_MAX)
self.assertEqual(entry.records_read, self.RECORDS_READ)
self.assertEqual(entry.records_written, self.RECORDS_WRITTEN)
self.assertEqual(entry.status, self.STATUS)
self.assertEqual(entry.steps, steps)
def test_start(self):
from google.cloud._helpers import _RFC3339_MICROS
klass = self._get_target_class()
entry = klass.from_api_repr({})
self.assertEqual(entry.start, None)
entry._properties["startMs"] = self.START_MS
self.assertEqual(
entry.start.strftime(_RFC3339_MICROS), self.START_RFC3339_MICROS
)
def test_end(self):
from google.cloud._helpers import _RFC3339_MICROS
klass = self._get_target_class()
entry = klass.from_api_repr({})
self.assertEqual(entry.end, None)
entry._properties["endMs"] = self.END_MS
self.assertEqual(entry.end.strftime(_RFC3339_MICROS), self.END_RFC3339_MICROS)
class TestScriptStackFrame(unittest.TestCase, _Base):
def _make_one(self, resource):
from google.cloud.bigquery.job import ScriptStackFrame
return ScriptStackFrame(resource)
def test_procedure_id(self):
frame = self._make_one({"procedureId": "some-procedure"})
self.assertEqual(frame.procedure_id, "some-procedure")
del frame._properties["procedureId"]
self.assertIsNone(frame.procedure_id)
def test_start_line(self):
frame = self._make_one({"startLine": 5})
self.assertEqual(frame.start_line, 5)
frame._properties["startLine"] = "5"
self.assertEqual(frame.start_line, 5)
def test_start_column(self):
frame = self._make_one({"startColumn": 29})
self.assertEqual(frame.start_column, 29)
frame._properties["startColumn"] = "29"
self.assertEqual(frame.start_column, 29)
def test_end_line(self):
frame = self._make_one({"endLine": 9})
self.assertEqual(frame.end_line, 9)
frame._properties["endLine"] = "9"
self.assertEqual(frame.end_line, 9)
def test_end_column(self):
frame = self._make_one({"endColumn": 14})
self.assertEqual(frame.end_column, 14)
frame._properties["endColumn"] = "14"
self.assertEqual(frame.end_column, 14)
def test_text(self):
frame = self._make_one({"text": "QUERY TEXT"})
self.assertEqual(frame.text, "QUERY TEXT")
class TestScriptStatistics(unittest.TestCase, _Base):
def _make_one(self, resource):
from google.cloud.bigquery.job import ScriptStatistics
return ScriptStatistics(resource)
def test_evalutation_kind(self):
stats = self._make_one({"evaluationKind": "EXPRESSION"})
self.assertEqual(stats.evaluation_kind, "EXPRESSION")
self.assertEqual(stats.stack_frames, [])
def test_stack_frames(self):
stats = self._make_one(
{
"stackFrames": [
{
"procedureId": "some-procedure",
"startLine": 5,
"startColumn": 29,
"endLine": 9,
"endColumn": 14,
"text": "QUERY TEXT",
},
{},
]
}
)
stack_frames = stats.stack_frames
self.assertEqual(len(stack_frames), 2)
stack_frame = stack_frames[0]
self.assertEqual(stack_frame.procedure_id, "some-procedure")
self.assertEqual(stack_frame.start_line, 5)
self.assertEqual(stack_frame.start_column, 29)
self.assertEqual(stack_frame.end_line, 9)
self.assertEqual(stack_frame.end_column, 14)
self.assertEqual(stack_frame.text, "QUERY TEXT")
stack_frame = stack_frames[1]
self.assertIsNone(stack_frame.procedure_id)
self.assertIsNone(stack_frame.start_line)
self.assertIsNone(stack_frame.start_column)
self.assertIsNone(stack_frame.end_line)
self.assertIsNone(stack_frame.end_column)
self.assertIsNone(stack_frame.text)
class TestTimelineEntry(unittest.TestCase, _Base):
ELAPSED_MS = 101
ACTIVE_UNITS = 50
PENDING_UNITS = 98
COMPLETED_UNITS = 520
SLOT_MILLIS = 12029
@staticmethod
def _get_target_class():
from google.cloud.bigquery.job import TimelineEntry
return TimelineEntry
def test_from_api_repr_empty(self):
klass = self._get_target_class()
entry = klass.from_api_repr({})
self.assertIsNone(entry.elapsed_ms)
self.assertIsNone(entry.active_units)
self.assertIsNone(entry.pending_units)
self.assertIsNone(entry.completed_units)
self.assertIsNone(entry.slot_millis)
def test_from_api_repr_normal(self):
resource = {
"elapsedMs": self.ELAPSED_MS,
"activeUnits": self.ACTIVE_UNITS,
"pendingUnits": self.PENDING_UNITS,
"completedUnits": self.COMPLETED_UNITS,
"totalSlotMs": self.SLOT_MILLIS,
}
klass = self._get_target_class()
entry = klass.from_api_repr(resource)
self.assertEqual(entry.elapsed_ms, self.ELAPSED_MS)
self.assertEqual(entry.active_units, self.ACTIVE_UNITS)
self.assertEqual(entry.pending_units, self.PENDING_UNITS)
self.assertEqual(entry.completed_units, self.COMPLETED_UNITS)
self.assertEqual(entry.slot_millis, self.SLOT_MILLIS)
@pytest.mark.parametrize(
"query,expected",
(
(None, False),
("", False),
("select name, age from table", False),
("select name, age from table LIMIT 10;", False),
("select name, age from table order by other_column;", True),
("Select name, age From table Order By other_column", True),
("SELECT name, age FROM table ORDER BY other_column;", True),
("select name, age from table order\nby other_column", True),
("Select name, age From table Order\nBy other_column;", True),
("SELECT name, age FROM table ORDER\nBY other_column", True),
("SelecT name, age froM table OrdeR \n\t BY other_column;", True),
),
)
def test__contains_order_by(query, expected):
from google.cloud.bigquery import job as mut
if expected:
assert mut._contains_order_by(query)
else:
assert not mut._contains_order_by(query)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
@pytest.mark.skipif(
bigquery_storage_v1beta1 is None, reason="Requires `google-cloud-bigquery-storage`"
)
@pytest.mark.parametrize(
"query",
(
"select name, age from table order by other_column;",
"Select name, age From table Order By other_column;",
"SELECT name, age FROM table ORDER BY other_column;",
"select name, age from table order\nby other_column;",
"Select name, age From table Order\nBy other_column;",
"SELECT name, age FROM table ORDER\nBY other_column;",
"SelecT name, age froM table OrdeR \n\t BY other_column;",
),
)
def test_to_dataframe_bqstorage_preserve_order(query):
from google.cloud.bigquery.job import QueryJob as target_class
job_resource = _make_job_resource(
project_id="test-project", job_type="query", ended=True
)
job_resource["configuration"]["query"]["query"] = query
job_resource["status"] = {"state": "DONE"}
get_query_results_resource = {
"jobComplete": True,
"jobReference": {"projectId": "test-project", "jobId": "test-job"},
"schema": {
"fields": [
{"name": "name", "type": "STRING", "mode": "NULLABLE"},
{"name": "age", "type": "INTEGER", "mode": "NULLABLE"},
]
},
"totalRows": "4",
}
connection = _make_connection(get_query_results_resource, job_resource)
client = _make_client(connection=connection)
job = target_class.from_api_repr(job_resource, client)
bqstorage_client = mock.create_autospec(
bigquery_storage_v1beta1.BigQueryStorageClient
)
session = bigquery_storage_v1beta1.types.ReadSession()
session.avro_schema.schema = json.dumps(
{
"type": "record",
"name": "__root__",
"fields": [
{"name": "name", "type": ["null", "string"]},
{"name": "age", "type": ["null", "long"]},
],
}
)
bqstorage_client.create_read_session.return_value = session
job.to_dataframe(bqstorage_client=bqstorage_client)
bqstorage_client.create_read_session.assert_called_once_with(
mock.ANY,
"projects/test-project",
format_=bigquery_storage_v1beta1.enums.DataFormat.ARROW,
read_options=mock.ANY,
# Use a single stream to preserve row order.
requested_streams=1,
)
| apache-2.0 |
wlamond/scikit-learn | examples/manifold/plot_manifold_sphere.py | 89 | 5055 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`sphx_glr_auto_examples_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <https://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <jaques.grobler@inria.fr>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
ax.view_init(40, -10)
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 5, 10)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
cactusbin/nyt | matplotlib/examples/event_handling/trifinder_event_demo.py | 7 | 1771 | """
Example showing the use of a TriFinder object. As the mouse is moved over the
triangulation, the triangle under the cursor is highlighted and the index of
the triangle is displayed in the plot title.
"""
import matplotlib.pyplot as plt
from matplotlib.tri import Triangulation
from matplotlib.patches import Polygon
import numpy as np
import math
def update_polygon(tri):
if tri == -1:
points = [0, 0, 0]
else:
points = triangulation.triangles[tri]
xs = triangulation.x[points]
ys = triangulation.y[points]
polygon.set_xy(zip(xs, ys))
def motion_notify(event):
if event.inaxes is None:
tri = -1
else:
tri = trifinder(event.xdata, event.ydata)
update_polygon(tri)
plt.title('In triangle %i' % tri)
event.canvas.draw()
# Create a Triangulation.
n_angles = 16
n_radii = 5
min_radius = 0.25
radii = np.linspace(min_radius, 0.95, n_radii)
angles = np.linspace(0, 2*math.pi, n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += math.pi / n_angles
x = (radii*np.cos(angles)).flatten()
y = (radii*np.sin(angles)).flatten()
triangulation = Triangulation(x, y)
xmid = x[triangulation.triangles].mean(axis=1)
ymid = y[triangulation.triangles].mean(axis=1)
mask = np.where(xmid*xmid + ymid*ymid < min_radius*min_radius, 1, 0)
triangulation.set_mask(mask)
# Use the triangulation's default TriFinder object.
trifinder = triangulation.get_trifinder()
# Setup plot and callbacks.
plt.subplot(111, aspect='equal')
plt.triplot(triangulation, 'bo-')
polygon = Polygon([[0, 0], [0, 0]], facecolor='y') # dummy data for xs,ys
update_polygon(-1)
plt.gca().add_patch(polygon)
plt.gcf().canvas.mpl_connect('motion_notify_event', motion_notify)
plt.show()
| unlicense |
raghavrv/scikit-learn | sklearn/cluster/spectral.py | 11 | 19173 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# Brian Cheung
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / np.linalg.norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int, RandomState instance or None, optional, default: None
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg' and by
the K-Means initialization. If int, random_state is the seed used by
the random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator is
the RandomState instance used by `np.random`.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float, default=1.0
Kernel coefficient for rbf, poly, sigmoid, laplacian and chi2 kernels.
Ignored for ``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int, RandomState instance or None, optional, default: None
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg' and by
the K-Means initialization. If int, random_state is the seed used by
the random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator is
the RandomState instance used by `np.random`.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- dist_matrix ** 2 / (2. * delta ** 2))
Where ``delta`` is a free parameter representing the width of the Gaussian
kernel.
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None, n_jobs=1):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True,
n_jobs=self.n_jobs)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
rxl194/18-327-wavelets-filter-banks | tools/kompresni.py | 2 | 8583 | ## kompresni.py
## This is an example of using Wavelets to decompose an image
## using Python libraries numpy, scipy, PyWavelets
##
##
## main reference that inspired me was this webpage:
## http://kastnerkyle.github.io/blog/2014/04/17/wavelets/
##
##
#####################################################################################
## Copyleft 2015, Ernest Yeung <ernestyalumni@gmail.com>
##
## 20150704
##
## This program, along with all its code, is free software;
## you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You can have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software Foundation, Inc.,
## S1 Franklin Street, Fifth Floor, Boston, MA
## 02110-1301, USA
##
## Governing the ethics of using this program, I default to the Caltech Honor Code:
## ``No member of the Caltech community shall take unfair advantage of
## any other member of the Caltech community.''
##
## If you like what I'm doing and would like to help and contribute support,
## please take a look at my crowdfunding campaign at ernestyalumni.tilt.com
## read my mission statement and give your financial support,
## no matter how small or large,
## if you can
## and to keep checking my ernestyalumni.wordpress.com blog and
## various social media channels
## for updates as I try to keep putting out great stuff.
##
## Fund Science! Help my physics education outreach and research efforts at
## Open/Tilt ernestyalumni.tilt.com - Ernest Yeung
##
## ernestyalumni.tilt.com
##
## Facebook : ernestyalumni
## gmail : ernestyalumni
## google : ernestyalumni
## linkedin : ernestyalumni
## Tilt/Open : ernestyalumni
## tumblr : ernestyalumni
## twitter : ernestyalumni
## youtube : ernestyalumni
## wordpress : ernestyalumni
##
##
################################################################################
##
# EY : 20150704 Install a PIL (Python Image Library). I installed Pillow with pip install pillow
import numpy as np
import scipy
from scipy import misc, ndimage
import pywt
import matplotlib.pyplot as plt
filename = "3-IS61836062.jpg" # EY : 20150704 obviously, you can use your own image or the built-in lena
simona = ndimage.imread(filename)
# let's get only the R,G,B values
simonaRGB = [simona[:,:,k] for k in range(3)] # cf. http://stackoverflow.com/questions/2725750/slicing-arrays-in-numpy-scipy
simona_dwt2_db4 = [pywt.dwt2(color, 'db4') for color in simonaRGB]
simona_dwt2_db4_cA = np.array([simona_dwt2_db4[0][0],simona_dwt2_db4[1][0],simona_dwt2_db4[2][0]])
simona_dwt2_db4_cA = simona_dwt2_db4_cA.reshape(903,602,3)
simona_dwt2_db4_cH = np.array([simona_dwt2_db4[0][1][0],simona_dwt2_db4[1][1][0],simona_dwt2_db4[2][1][0]])
simona_dwt2_db4_cH = simona_dwt2_db4_cA.reshape(903,602,3)
simona_dwt2_db4_cV = np.array([simona_dwt2_db4[0][1][1],simona_dwt2_db4[1][1][1],simona_dwt2_db4[2][1][1]])
simona_dwt2_db4_cV = simona_dwt2_db4_cA.reshape(903,602,3)
simona_dwt2_db4_cD = np.array([simona_dwt2_db4[0][1][2],simona_dwt2_db4[1][1][2],simona_dwt2_db4[2][1][2]])
simona_dwt2_db4_cD = simona_dwt2_db4_cD.reshape(903,602,3)
simona_idwt2_db4 = [pywt.idwt2(simona_dwt2_db4[0],'db4'),pywt.idwt2(simona_dwt2_db4[1],'db4'),pywt.idwt2(simona_dwt2_db4[2],'db4')]
simona_idwt2_db4 = np.array(simona_idwt2_db4)
simona_idwt2_db4 = simona_idwt2_db4.reshape(1800,1198,3)
def colorwavedec2(data,res=5,wavelet='db4'):
return [pywt.wavedec2(color,wavelet,level=res) for color in data]
def colorwaverec2(colorcoeffs, wavelet='db4'):
N = len(colorcoeffs)
img = np.array([pywt.waverec2(colorcoeff, wavelet) for colorcoeff in colorcoeffs])
m,n = img[0].shape
img = img.reshape(m,n,N)
return img
simona_wavedec2_db4 = colorwavedec2( simonaRGB )
simona_waverec2_db4 = colorwaverec2( simona_wavedec2_db4)
plt.figure(1)
plt.subplot(4,1,1)
plt.imshow( simona_dwt2_db4[0][0] )
#plt.title("cA approximation detail coefficients of Daubechies 4 single level discrete wavelet transform of Red (cerveny)")
plt.subplot(4,1,2)
plt.imshow( simona_dwt2_db4[0][1][0] )
#plt.title("cH horizontal detail coefficients of Daubechies 4 single level discrete wavelet transformor Red (cerveny)")
plt.subplot(4,2,1)
plt.imshow( simona_dwt2_db4[0][1][1] )
#plt.title("cV vertical detail coefficients of Daubechies 4 single level discrete wavelet transform of Red (cerveny)")
plt.subplot(4,2,2)
plt.imshow( simona_dwt2_db4[0][1][2] )
#plt.title("cD diagonal detail coefficients of Daubechies 4 single level discrete wavelet transform of Red (cerveny)")
plt.figure(2)
plt.subplot(4,1,1)
plt.imshow( simona_dwt2_db4[1][0] )
plt.title("cA approximation detail coefficients of Daubechies 4 single level discrete wavelet transform of Green (zeleny)")
plt.subplot(4,1,2)
plt.imshow( simona_dwt2_db4[1][1][0] )
plt.title("cH horizontal detail coefficients of Daubechies 4 single level discrete wavelet transformor Green (zeleny)")
plt.subplot(4,2,1)
plt.imshow( simona_dwt2_db4[1][1][1] )
plt.title("cV vertical detail coefficients of Daubechies 4 single level discrete wavelet transform of Green (zeleny)")
plt.subplot(4,2,2)
plt.imshow( simona_dwt2_db4[1][1][2] )
plt.title("cD diagonal detail coefficients of Daubechies 4 single level discrete wavelet transform of Green (zeleny)")
plt.figure(3)
plt.subplot(4,1,1)
plt.imshow( simona_dwt2_db4[2][0] )
plt.title("cA approximation detail coefficients of Daubechies 4 single level discrete wavelet transform of Blue (modry)")
plt.subplot(4,1,2)
plt.imshow( simona_dwt2_db4[2][1][0] )
plt.title("cH horizontal detail coefficients of Daubechies 4 single level discrete wavelet transformor Blue (modry)")
plt.subplot(4,2,1)
plt.imshow( simona_dwt2_db4[2][1][1] )
plt.title("cV vertical detail coefficients of Daubechies 4 single level discrete wavelet transform of Blue (modry)")
plt.subplot(4,2,2)
plt.imshow( simona_dwt2_db4[2][1][2] )
plt.title("cD diagonal detail coefficients of Daubechies 4 single level discrete wavelet transform of Red (cerveny)")
plt.figure(4)
plt.imshow( simona_dwt2_db4_cA)
plt.title("cA approx coeff of Daub 4 single level DWT")
plt.figure(5)
plt.imshow( simona_dwt2_db4_cH)
plt.title("cH hori coeff of Daub 4 single level DWT")
plt.figure(6)
plt.imshow( simona_dwt2_db4_cV)
plt.title("cV vert coeff of Daub 4 single level DWT")
plt.figure(7)
plt.imshow( simona_dwt2_db4_cD)
plt.title("cD diag coeff of Daub 4 single level DWT")
plt.figure(8)
plt.imshow(simona_idwt2_db4)
plt.title("IDWT using Daub 4 on single level DWT using Daub 4")
plt.figure(9)
plt.imshow(simona_waverec2_db4)
plt.title("5 level reconstruction using Daub 4")
| mit |
GuessWhoSamFoo/pandas | pandas/tests/util/test_validate_kwargs.py | 2 | 2040 | # -*- coding: utf-8 -*-
from collections import OrderedDict
import pytest
from pandas.util._validators import validate_bool_kwarg, validate_kwargs
_fname = "func"
def test_bad_kwarg():
good_arg = "f"
bad_arg = good_arg + "o"
compat_args = OrderedDict()
compat_args[good_arg] = "foo"
compat_args[bad_arg + "o"] = "bar"
kwargs = {good_arg: "foo", bad_arg: "bar"}
msg = (r"{fname}\(\) got an unexpected "
r"keyword argument '{arg}'".format(fname=_fname, arg=bad_arg))
with pytest.raises(TypeError, match=msg):
validate_kwargs(_fname, kwargs, compat_args)
@pytest.mark.parametrize("i", range(1, 3))
def test_not_all_none(i):
bad_arg = "foo"
msg = (r"the '{arg}' parameter is not supported "
r"in the pandas implementation of {func}\(\)".
format(arg=bad_arg, func=_fname))
compat_args = OrderedDict()
compat_args["foo"] = 1
compat_args["bar"] = "s"
compat_args["baz"] = None
kwarg_keys = ("foo", "bar", "baz")
kwarg_vals = (2, "s", None)
kwargs = dict(zip(kwarg_keys[:i], kwarg_vals[:i]))
with pytest.raises(ValueError, match=msg):
validate_kwargs(_fname, kwargs, compat_args)
def test_validation():
# No exceptions should be raised.
compat_args = OrderedDict()
compat_args["f"] = None
compat_args["b"] = 1
compat_args["ba"] = "s"
kwargs = dict(f=None, b=1)
validate_kwargs(_fname, kwargs, compat_args)
@pytest.mark.parametrize("name", ["inplace", "copy"])
@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
def test_validate_bool_kwarg_fail(name, value):
msg = ("For argument \"%s\" expected type bool, received type %s" %
(name, type(value).__name__))
with pytest.raises(ValueError, match=msg):
validate_bool_kwarg(value, name)
@pytest.mark.parametrize("name", ["inplace", "copy"])
@pytest.mark.parametrize("value", [True, False, None])
def test_validate_bool_kwarg(name, value):
assert validate_bool_kwarg(value, name) == value
| bsd-3-clause |
wwf5067/statsmodels | statsmodels/graphics/dotplots.py | 31 | 18190 | import numpy as np
from statsmodels.compat import range
from . import utils
def dot_plot(points, intervals=None, lines=None, sections=None,
styles=None, marker_props=None, line_props=None,
split_names=None, section_order=None, line_order=None,
stacked=False, styles_order=None, striped=False,
horizontal=True, show_names="both",
fmt_left_name=None, fmt_right_name=None,
show_section_titles=None, ax=None):
"""
Produce a dotplot similar in style to those in Cleveland's
"Visualizing Data" book. These are also known as "forest plots".
Parameters
----------
points : array_like
The quantitative values to be plotted as markers.
intervals : array_like
The intervals to be plotted around the points. The elements
of `intervals` are either scalars or sequences of length 2. A
scalar indicates the half width of a symmetric interval. A
sequence of length 2 contains the left and right half-widths
(respectively) of a nonsymmetric interval. If None, no
intervals are drawn.
lines : array_like
A grouping variable indicating which points/intervals are
drawn on a common line. If None, each point/interval appears
on its own line.
sections : array_like
A grouping variable indicating which lines are grouped into
sections. If None, everything is drawn in a single section.
styles : array_like
A grouping label defining the plotting style of the markers
and intervals.
marker_props : dict
A dictionary mapping style codes (the values in `styles`) to
dictionaries defining key/value pairs to be passed as keyword
arguments to `plot` when plotting markers. Useful keyword
arguments are "color", "marker", and "ms" (marker size).
line_props : dict
A dictionary mapping style codes (the values in `styles`) to
dictionaries defining key/value pairs to be passed as keyword
arguments to `plot` when plotting interval lines. Useful
keyword arguments are "color", "linestyle", "solid_capstyle",
and "linewidth".
split_names : string
If not None, this is used to split the values of `lines` into
substrings that are drawn in the left and right margins,
respectively. If None, the values of `lines` are drawn in the
left margin.
section_order : array_like
The section labels in the order in which they appear in the
dotplot.
line_order : array_like
The line labels in the order in which they appear in the
dotplot.
stacked : boolean
If True, when multiple points or intervals are drawn on the
same line, they are offset from each other.
styles_order : array_like
If stacked=True, this is the order in which the point styles
on a given line are drawn from top to bottom (if horizontal
is True) or from left to right (if horiontal is False). If
None (default), the order is lexical.
striped : boolean
If True, every other line is enclosed in a shaded box.
horizontal : boolean
If True (default), the lines are drawn horizontally, otherwise
they are drawn vertically.
show_names : string
Determines whether labels (names) are shown in the left and/or
right margins (top/bottom margins if `horizontal` is True).
If `both`, labels are drawn in both margins, if 'left', labels
are drawn in the left or top margin. If `right`, labels are
drawn in the right or bottom margin.
fmt_left_name : function
The left/top margin names are passed through this function
before drawing on the plot.
fmt_right_name : function
The right/bottom marginnames are passed through this function
before drawing on the plot.
show_section_titles : bool or None
If None, section titles are drawn only if there is more than
one section. If False/True, section titles are never/always
drawn, respectively.
ax : matplotlib.axes
The axes on which the dotplot is drawn. If None, a new axes
is created.
Returns
-------
fig : Figure
The figure given by `ax.figure` or a new instance.
Notes
-----
`points`, `intervals`, `lines`, `sections`, `styles` must all have
the same length whenever present.
Examples
--------
This is a simple dotplot with one point per line:
>>> dot_plot(points=point_values)
This dotplot has labels on the lines (if elements in
`label_values` are repeated, the corresponding points appear on
the same line):
>>> dot_plot(points=point_values, lines=label_values)
References
----------
* Cleveland, William S. (1993). "Visualizing Data". Hobart
Press.
* Jacoby, William G. (2006) "The Dot Plot: A Graphical Display
for Labeled Quantitative Values." The Political Methodologist
14(1): 6-14.
"""
import matplotlib.transforms as transforms
fig, ax = utils.create_mpl_ax(ax)
# Convert to numpy arrays if that is not what we are given.
points = np.asarray(points)
asarray_or_none = lambda x : None if x is None else np.asarray(x)
intervals = asarray_or_none(intervals)
lines = asarray_or_none(lines)
sections = asarray_or_none(sections)
styles = asarray_or_none(styles)
# Total number of points
npoint = len(points)
# Set default line values if needed
if lines is None:
lines = np.arange(npoint)
# Set default section values if needed
if sections is None:
sections = np.zeros(npoint)
# Set default style values if needed
if styles is None:
styles = np.zeros(npoint)
# The vertical space (in inches) for a section title
section_title_space = 0.5
# The number of sections
nsect = len(set(sections))
if section_order is not None:
nsect = len(set(section_order))
# The number of section titles
if show_section_titles == False:
draw_section_titles = False
nsect_title = 0
elif show_section_titles == True:
draw_section_titles = True
nsect_title = nsect
else:
draw_section_titles = nsect > 1
nsect_title = nsect if nsect > 1 else 0
# The total vertical space devoted to section titles.
section_space_total = section_title_space * nsect_title
# Add a bit of room so that points that fall at the axis limits
# are not cut in half.
ax.set_xmargin(0.02)
ax.set_ymargin(0.02)
if section_order is None:
lines0 = list(set(sections))
lines0.sort()
else:
lines0 = section_order
if line_order is None:
lines1 = list(set(lines))
lines1.sort()
else:
lines1 = line_order
# A map from (section,line) codes to index positions.
lines_map = {}
for i in range(npoint):
if section_order is not None and sections[i] not in section_order:
continue
if line_order is not None and lines[i] not in line_order:
continue
ky = (sections[i], lines[i])
if ky not in lines_map:
lines_map[ky] = []
lines_map[ky].append(i)
# Get the size of the axes on the parent figure in inches
bbox = ax.get_window_extent().transformed(
fig.dpi_scale_trans.inverted())
awidth, aheight = bbox.width, bbox.height
# The number of lines in the plot.
nrows = len(lines_map)
# The positions of the lowest and highest guideline in axes
# coordinates (for horizontal dotplots), or the leftmost and
# rightmost guidelines (for vertical dotplots).
bottom, top = 0, 1
if horizontal:
# x coordinate is data, y coordinate is axes
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
else:
# x coordinate is axes, y coordinate is data
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
# Space used for a section title, in axes coordinates
title_space_axes = section_title_space / aheight
# Space between lines
if horizontal:
dpos = (top - bottom - nsect_title*title_space_axes) /\
float(nrows)
else:
dpos = (top - bottom) / float(nrows)
# Determine the spacing for stacked points
if styles_order is not None:
style_codes = styles_order
else:
style_codes = list(set(styles))
style_codes.sort()
# Order is top to bottom for horizontal plots, so need to
# flip.
if horizontal:
style_codes = style_codes[::-1]
# nval is the maximum number of points on one line.
nval = len(style_codes)
if nval > 1:
stackd = dpos / (2.5*(float(nval)-1))
else:
stackd = 0.
# Map from style code to its integer position
#style_codes_map = {x: style_codes.index(x) for x in style_codes}
# python 2.6 compat version:
style_codes_map = dict((x, style_codes.index(x)) for x in style_codes)
# Setup default marker styles
colors = ["r", "g", "b", "y", "k", "purple", "orange"]
if marker_props is None:
#marker_props = {x: {} for x in style_codes}
# python 2.6 compat version:
marker_props = dict((x, {}) for x in style_codes)
for j in range(nval):
sc = style_codes[j]
if "color" not in marker_props[sc]:
marker_props[sc]["color"] = colors[j % len(colors)]
if "marker" not in marker_props[sc]:
marker_props[sc]["marker"] = "o"
if "ms" not in marker_props[sc]:
marker_props[sc]["ms"] = 10 if stackd == 0 else 6
# Setup default line styles
if line_props is None:
#line_props = {x: {} for x in style_codes}
# python 2.6 compat version:
line_props = dict((x, {}) for x in style_codes)
for j in range(nval):
sc = style_codes[j]
if "color" not in line_props[sc]:
line_props[sc]["color"] = "grey"
if "linewidth" not in line_props[sc]:
line_props[sc]["linewidth"] = 2 if stackd > 0 else 8
if horizontal:
# The vertical position of the first line.
pos = top - dpos/2 if nsect == 1 else top
else:
# The horizontal position of the first line.
pos = bottom + dpos/2
# Points that have already been labeled
labeled = set()
# Positions of the y axis grid lines
ticks = []
# Loop through the sections
for k0 in lines0:
# Draw a section title
if draw_section_titles:
if horizontal:
y0 = pos + dpos/2 if k0 == lines0[0] else pos
ax.fill_between((0, 1), (y0,y0),
(pos-0.7*title_space_axes,
pos-0.7*title_space_axes),
color='darkgrey',
transform=ax.transAxes,
zorder=1)
txt = ax.text(0.5, pos - 0.35*title_space_axes, k0,
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes)
txt.set_fontweight("bold")
pos -= title_space_axes
else:
m = len([k for k in lines_map if k[0] == k0])
ax.fill_between((pos-dpos/2+0.01,
pos+(m-1)*dpos+dpos/2-0.01),
(1.01,1.01), (1.06,1.06),
color='darkgrey',
transform=ax.transAxes,
zorder=1, clip_on=False)
txt = ax.text(pos + (m-1)*dpos/2, 1.02, k0,
horizontalalignment='center',
verticalalignment='bottom',
transform=ax.transAxes)
txt.set_fontweight("bold")
jrow = 0
for k1 in lines1:
# No data to plot
if (k0, k1) not in lines_map:
continue
# Draw the guideline
if horizontal:
ax.axhline(pos, color='grey')
else:
ax.axvline(pos, color='grey')
# Set up the labels
if split_names is not None:
us = k1.split(split_names)
if len(us) >= 2:
left_label, right_label = us[0], us[1]
else:
left_label, right_label = k1, None
else:
left_label, right_label = k1, None
if fmt_left_name is not None:
left_label = fmt_left_name(left_label)
if fmt_right_name is not None:
right_label = fmt_right_name(right_label)
# Draw the stripe
if striped and jrow % 2 == 0:
if horizontal:
ax.fill_between((0, 1), (pos-dpos/2, pos-dpos/2),
(pos+dpos/2, pos+dpos/2),
color='lightgrey',
transform=ax.transAxes,
zorder=0)
else:
ax.fill_between((pos-dpos/2, pos+dpos/2),
(0, 0), (1, 1),
color='lightgrey',
transform=ax.transAxes,
zorder=0)
jrow += 1
# Draw the left margin label
if show_names.lower() in ("left", "both"):
if horizontal:
ax.text(-0.1/awidth, pos, left_label,
horizontalalignment="right",
verticalalignment='center',
transform=ax.transAxes,
family='monospace')
else:
ax.text(pos, -0.1/aheight, left_label,
horizontalalignment="center",
verticalalignment='top',
transform=ax.transAxes,
family='monospace')
# Draw the right margin label
if show_names.lower() in ("right", "both"):
if right_label is not None:
if horizontal:
ax.text(1 + 0.1/awidth, pos, right_label,
horizontalalignment="left",
verticalalignment='center',
transform=ax.transAxes,
family='monospace')
else:
ax.text(pos, 1 + 0.1/aheight, right_label,
horizontalalignment="center",
verticalalignment='bottom',
transform=ax.transAxes,
family='monospace')
# Save the vertical position so that we can place the
# tick marks
ticks.append(pos)
# Loop over the points in one line
for ji,jp in enumerate(lines_map[(k0,k1)]):
# Calculate the vertical offset
yo = 0
if stacked:
yo = -dpos/5 + style_codes_map[styles[jp]]*stackd
pt = points[jp]
# Plot the interval
if intervals is not None:
# Symmetric interval
if np.isscalar(intervals[jp]):
lcb, ucb = pt - intervals[jp],\
pt + intervals[jp]
# Nonsymmetric interval
else:
lcb, ucb = pt - intervals[jp][0],\
pt + intervals[jp][1]
# Draw the interval
if horizontal:
ax.plot([lcb, ucb], [pos+yo, pos+yo], '-',
transform=trans,
**line_props[styles[jp]])
else:
ax.plot([pos+yo, pos+yo], [lcb, ucb], '-',
transform=trans,
**line_props[styles[jp]])
# Plot the point
sl = styles[jp]
sll = sl if sl not in labeled else None
labeled.add(sl)
if horizontal:
ax.plot([pt,], [pos+yo,], ls='None',
transform=trans, label=sll,
**marker_props[sl])
else:
ax.plot([pos+yo,], [pt,], ls='None',
transform=trans, label=sll,
**marker_props[sl])
if horizontal:
pos -= dpos
else:
pos += dpos
# Set up the axis
if horizontal:
ax.xaxis.set_ticks_position("bottom")
ax.yaxis.set_ticks_position("none")
ax.set_yticklabels([])
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_position(('axes', -0.1/aheight))
ax.set_ylim(0, 1)
ax.yaxis.set_ticks(ticks)
ax.autoscale_view(scaley=False, tight=True)
else:
ax.yaxis.set_ticks_position("left")
ax.xaxis.set_ticks_position("none")
ax.set_xticklabels([])
ax.spines['bottom'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['left'].set_position(('axes', -0.1/awidth))
ax.set_xlim(0, 1)
ax.xaxis.set_ticks(ticks)
ax.autoscale_view(scalex=False, tight=True)
return fig
| bsd-3-clause |
zzcclp/spark | python/pyspark/pandas/plot/core.py | 11 | 41921 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
import pandas as pd
import numpy as np
from pyspark.ml.feature import Bucketizer
from pyspark.mllib.stat import KernelDensity # type: ignore
from pyspark.sql import functions as F
from pandas.core.base import PandasObject
from pandas.core.dtypes.inference import is_integer
from pyspark.pandas.missing import unsupported_function
from pyspark.pandas.config import get_option
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.utils import name_like_string
class TopNPlotBase:
def get_top_n(self, data):
from pyspark.pandas import DataFrame, Series
max_rows = get_option("plotting.max_rows")
# Simply use the first 1k elements and make it into a pandas dataframe
# For categorical variables, it is likely called from df.x.value_counts().plot.xxx().
if isinstance(data, (Series, DataFrame)):
data = data.head(max_rows + 1).to_pandas()
else:
raise TypeError("Only DataFrame and Series are supported for plotting.")
self.partial = False
if len(data) > max_rows:
self.partial = True
data = data.iloc[:max_rows]
return data
def set_result_text(self, ax):
max_rows = get_option("plotting.max_rows")
assert hasattr(self, "partial")
if self.partial:
ax.text(
1,
1,
"showing top {} elements only".format(max_rows),
size=6,
ha="right",
va="bottom",
transform=ax.transAxes,
)
class SampledPlotBase:
def get_sampled(self, data):
from pyspark.pandas import DataFrame, Series
fraction = get_option("plotting.sample_ratio")
if fraction is None:
fraction = 1 / (len(data) / get_option("plotting.max_rows"))
fraction = min(1.0, fraction)
self.fraction = fraction
if isinstance(data, (DataFrame, Series)):
if isinstance(data, Series):
data = data.to_frame()
sampled = data._internal.resolved_copy.spark_frame.sample(fraction=self.fraction)
return DataFrame(data._internal.with_new_sdf(sampled)).to_pandas()
else:
raise TypeError("Only DataFrame and Series are supported for plotting.")
def set_result_text(self, ax):
assert hasattr(self, "fraction")
if self.fraction < 1:
ax.text(
1,
1,
"showing the sampled result by fraction %s" % self.fraction,
size=6,
ha="right",
va="bottom",
transform=ax.transAxes,
)
class HistogramPlotBase:
@staticmethod
def prepare_hist_data(data, bins):
# TODO: this logic is similar with KdePlotBase. Might have to deduplicate it.
from pyspark.pandas.series import Series
if isinstance(data, Series):
data = data.to_frame()
numeric_data = data.select_dtypes(
include=["byte", "decimal", "integer", "float", "long", "double", np.datetime64]
)
# no empty frames or series allowed
if len(numeric_data.columns) == 0:
raise TypeError(
"Empty {0!r}: no numeric data to " "plot".format(numeric_data.__class__.__name__)
)
if is_integer(bins):
# computes boundaries for the column
bins = HistogramPlotBase.get_bins(data.to_spark(), bins)
return numeric_data, bins
@staticmethod
def get_bins(sdf, bins):
# 'data' is a Spark DataFrame that selects all columns.
if len(sdf.columns) > 1:
min_col = F.least(*map(F.min, sdf))
max_col = F.greatest(*map(F.max, sdf))
else:
min_col = F.min(sdf.columns[-1])
max_col = F.max(sdf.columns[-1])
boundaries = sdf.select(min_col, max_col).first()
# divides the boundaries into bins
if boundaries[0] == boundaries[1]:
boundaries = (boundaries[0] - 0.5, boundaries[1] + 0.5)
return np.linspace(boundaries[0], boundaries[1], bins + 1)
@staticmethod
def compute_hist(psdf, bins):
# 'data' is a Spark DataFrame that selects one column.
assert isinstance(bins, (np.ndarray, np.generic))
sdf = psdf._internal.spark_frame
scols = []
input_column_names = []
for label in psdf._internal.column_labels:
input_column_name = name_like_string(label)
input_column_names.append(input_column_name)
scols.append(psdf._internal.spark_column_for(label).alias(input_column_name))
sdf = sdf.select(*scols)
# 1. Make the bucket output flat to:
# +----------+-------+
# |__group_id|buckets|
# +----------+-------+
# |0 |0.0 |
# |0 |0.0 |
# |0 |1.0 |
# |0 |2.0 |
# |0 |3.0 |
# |0 |3.0 |
# |1 |0.0 |
# |1 |1.0 |
# |1 |1.0 |
# |1 |2.0 |
# |1 |1.0 |
# |1 |0.0 |
# +----------+-------+
colnames = sdf.columns
bucket_names = ["__{}_bucket".format(colname) for colname in colnames]
output_df = None
for group_id, (colname, bucket_name) in enumerate(zip(colnames, bucket_names)):
# creates a Bucketizer to get corresponding bin of each value
bucketizer = Bucketizer(
splits=bins, inputCol=colname, outputCol=bucket_name, handleInvalid="skip"
)
bucket_df = bucketizer.transform(sdf)
if output_df is None:
output_df = bucket_df.select(
SF.lit(group_id).alias("__group_id"), F.col(bucket_name).alias("__bucket")
)
else:
output_df = output_df.union(
bucket_df.select(
SF.lit(group_id).alias("__group_id"), F.col(bucket_name).alias("__bucket")
)
)
# 2. Calculate the count based on each group and bucket.
# +----------+-------+------+
# |__group_id|buckets| count|
# +----------+-------+------+
# |0 |0.0 |2 |
# |0 |1.0 |1 |
# |0 |2.0 |1 |
# |0 |3.0 |2 |
# |1 |0.0 |2 |
# |1 |1.0 |3 |
# |1 |2.0 |1 |
# +----------+-------+------+
result = (
output_df.groupby("__group_id", "__bucket")
.agg(F.count("*").alias("count"))
.toPandas()
.sort_values(by=["__group_id", "__bucket"])
)
# 3. Fill empty bins and calculate based on each group id. From:
# +----------+--------+------+
# |__group_id|__bucket| count|
# +----------+--------+------+
# |0 |0.0 |2 |
# |0 |1.0 |1 |
# |0 |2.0 |1 |
# |0 |3.0 |2 |
# +----------+--------+------+
# +----------+--------+------+
# |__group_id|__bucket| count|
# +----------+--------+------+
# |1 |0.0 |2 |
# |1 |1.0 |3 |
# |1 |2.0 |1 |
# +----------+--------+------+
#
# to:
# +-----------------+
# |__values1__bucket|
# +-----------------+
# |2 |
# |1 |
# |1 |
# |2 |
# |0 |
# +-----------------+
# +-----------------+
# |__values2__bucket|
# +-----------------+
# |2 |
# |3 |
# |1 |
# |0 |
# |0 |
# +-----------------+
output_series = []
for i, (input_column_name, bucket_name) in enumerate(zip(input_column_names, bucket_names)):
current_bucket_result = result[result["__group_id"] == i]
# generates a pandas DF with one row for each bin
# we need this as some of the bins may be empty
indexes = pd.DataFrame({"__bucket": np.arange(0, len(bins) - 1)})
# merges the bins with counts on it and fills remaining ones with zeros
pdf = indexes.merge(current_bucket_result, how="left", on=["__bucket"]).fillna(0)[
["count"]
]
pdf.columns = [input_column_name]
output_series.append(pdf[input_column_name])
return output_series
class BoxPlotBase:
@staticmethod
def compute_stats(data, colname, whis, precision):
# Computes mean, median, Q1 and Q3 with approx_percentile and precision
pdf = data._psdf._internal.resolved_copy.spark_frame.agg(
*[
F.expr(
"approx_percentile(`{}`, {}, {})".format(colname, q, int(1.0 / precision))
).alias("{}_{}%".format(colname, int(q * 100)))
for q in [0.25, 0.50, 0.75]
],
F.mean("`%s`" % colname).alias("{}_mean".format(colname)),
).toPandas()
# Computes IQR and Tukey's fences
iqr = "{}_iqr".format(colname)
p75 = "{}_75%".format(colname)
p25 = "{}_25%".format(colname)
pdf.loc[:, iqr] = pdf.loc[:, p75] - pdf.loc[:, p25]
pdf.loc[:, "{}_lfence".format(colname)] = pdf.loc[:, p25] - whis * pdf.loc[:, iqr]
pdf.loc[:, "{}_ufence".format(colname)] = pdf.loc[:, p75] + whis * pdf.loc[:, iqr]
qnames = ["25%", "50%", "75%", "mean", "lfence", "ufence"]
col_summ = pdf[["{}_{}".format(colname, q) for q in qnames]]
col_summ.columns = qnames
lfence, ufence = col_summ["lfence"], col_summ["ufence"]
stats = {
"mean": col_summ["mean"].values[0],
"med": col_summ["50%"].values[0],
"q1": col_summ["25%"].values[0],
"q3": col_summ["75%"].values[0],
}
return stats, (lfence.values[0], ufence.values[0])
@staticmethod
def outliers(data, colname, lfence, ufence):
# Builds expression to identify outliers
expression = F.col("`%s`" % colname).between(lfence, ufence)
# Creates a column to flag rows as outliers or not
return data._psdf._internal.resolved_copy.spark_frame.withColumn(
"__{}_outlier".format(colname), ~expression
)
@staticmethod
def calc_whiskers(colname, outliers):
# Computes min and max values of non-outliers - the whiskers
minmax = (
outliers.filter("not `__{}_outlier`".format(colname))
.agg(F.min("`%s`" % colname).alias("min"), F.max(colname).alias("max"))
.toPandas()
)
return minmax.iloc[0][["min", "max"]].values
@staticmethod
def get_fliers(colname, outliers, min_val):
# Filters only the outliers, should "showfliers" be True
fliers_df = outliers.filter("`__{}_outlier`".format(colname))
# If shows fliers, takes the top 1k with highest absolute values
# Here we normalize the values by subtracting the minimum value from
# each, and use absolute values.
order_col = F.abs(F.col("`{}`".format(colname)) - min_val.item())
fliers = (
fliers_df.select(F.col("`{}`".format(colname)))
.orderBy(order_col)
.limit(1001)
.toPandas()[colname]
.values
)
return fliers
class KdePlotBase:
@staticmethod
def prepare_kde_data(data):
# TODO: this logic is similar with HistogramPlotBase. Might have to deduplicate it.
from pyspark.pandas.series import Series
if isinstance(data, Series):
data = data.to_frame()
numeric_data = data.select_dtypes(
include=["byte", "decimal", "integer", "float", "long", "double", np.datetime64]
)
# no empty frames or series allowed
if len(numeric_data.columns) == 0:
raise TypeError(
"Empty {0!r}: no numeric data to " "plot".format(numeric_data.__class__.__name__)
)
return numeric_data
@staticmethod
def get_ind(sdf, ind):
def calc_min_max():
if len(sdf.columns) > 1:
min_col = F.least(*map(F.min, sdf))
max_col = F.greatest(*map(F.max, sdf))
else:
min_col = F.min(sdf.columns[-1])
max_col = F.max(sdf.columns[-1])
return sdf.select(min_col, max_col).first()
if ind is None:
min_val, max_val = calc_min_max()
sample_range = max_val - min_val
ind = np.linspace(
min_val - 0.5 * sample_range,
max_val + 0.5 * sample_range,
1000,
)
elif is_integer(ind):
min_val, max_val = calc_min_max()
sample_range = max_val - min_val
ind = np.linspace(
min_val - 0.5 * sample_range,
max_val + 0.5 * sample_range,
ind,
)
return ind
@staticmethod
def compute_kde(sdf, bw_method=None, ind=None):
# 'sdf' is a Spark DataFrame that selects one column.
# Using RDD is slow so we might have to change it to Dataset based implementation
# once Spark has that implementation.
sample = sdf.rdd.map(lambda x: float(x[0]))
kd = KernelDensity()
kd.setSample(sample)
assert isinstance(bw_method, (int, float)), "'bw_method' must be set as a scalar number."
if bw_method is not None:
# Match the bandwidth with Spark.
kd.setBandwidth(float(bw_method))
return kd.estimate(list(map(float, ind)))
class PandasOnSparkPlotAccessor(PandasObject):
"""
Series/Frames plotting accessor and method.
Uses the backend specified by the
option ``plotting.backend``. By default, plotly is used.
Plotting methods can also be accessed by calling the accessor as a method
with the ``kind`` argument:
``s.plot(kind='hist')`` is equivalent to ``s.plot.hist()``
"""
pandas_plot_data_map = {
"pie": TopNPlotBase().get_top_n,
"bar": TopNPlotBase().get_top_n,
"barh": TopNPlotBase().get_top_n,
"scatter": TopNPlotBase().get_top_n,
"area": SampledPlotBase().get_sampled,
"line": SampledPlotBase().get_sampled,
}
_backends = {} # type: ignore
def __init__(self, data):
self.data = data
@staticmethod
def _find_backend(backend):
"""
Find a pandas-on-Spark plotting backend
"""
try:
return PandasOnSparkPlotAccessor._backends[backend]
except KeyError:
try:
module = importlib.import_module(backend)
except ImportError:
# We re-raise later on.
pass
else:
if hasattr(module, "plot") or hasattr(module, "plot_pandas_on_spark"):
# Validate that the interface is implemented when the option
# is set, rather than at plot time.
PandasOnSparkPlotAccessor._backends[backend] = module
return module
raise ValueError(
"Could not find plotting backend '{backend}'. Ensure that you've installed "
"the package providing the '{backend}' entrypoint, or that the package has a "
"top-level `.plot` method.".format(backend=backend)
)
@staticmethod
def _get_plot_backend(backend=None):
backend = backend or get_option("plotting.backend")
# Shortcut
if backend in PandasOnSparkPlotAccessor._backends:
return PandasOnSparkPlotAccessor._backends[backend]
if backend == "matplotlib":
# Because matplotlib is an optional dependency,
# we need to attempt an import here to raise an ImportError if needed.
try:
# test if matplotlib can be imported
import matplotlib # noqa: F401
from pyspark.pandas.plot import matplotlib as module
except ImportError:
raise ImportError(
"matplotlib is required for plotting when the "
"default backend 'matplotlib' is selected."
) from None
PandasOnSparkPlotAccessor._backends["matplotlib"] = module
elif backend == "plotly":
try:
# test if plotly can be imported
import plotly # noqa: F401
from pyspark.pandas.plot import plotly as module
except ImportError:
raise ImportError(
"plotly is required for plotting when the "
"default backend 'plotly' is selected."
) from None
PandasOnSparkPlotAccessor._backends["plotly"] = module
else:
module = PandasOnSparkPlotAccessor._find_backend(backend)
PandasOnSparkPlotAccessor._backends[backend] = module
return module
def __call__(self, kind="line", backend=None, **kwargs):
plot_backend = PandasOnSparkPlotAccessor._get_plot_backend(backend)
plot_data = self.data
kind = {"density": "kde"}.get(kind, kind)
if hasattr(plot_backend, "plot_pandas_on_spark"):
# use if there's pandas-on-Spark specific method.
return plot_backend.plot_pandas_on_spark(plot_data, kind=kind, **kwargs)
else:
# fallback to use pandas'
if not PandasOnSparkPlotAccessor.pandas_plot_data_map[kind]:
raise NotImplementedError(
"'%s' plot is not supported with '%s' plot "
"backend yet." % (kind, plot_backend.__name__)
)
plot_data = PandasOnSparkPlotAccessor.pandas_plot_data_map[kind](plot_data)
return plot_backend.plot(plot_data, kind=kind, **kwargs)
def line(self, x=None, y=None, **kwargs):
"""
Plot DataFrame/Series as lines.
This function is useful to plot lines using Series's values
as coordinates.
Parameters
----------
x : int or str, optional
Columns to use for the horizontal axis.
Either the location or the label of the columns to be used.
By default, it will use the DataFrame indices.
y : int, str, or list of them, optional
The values to be plotted.
Either the location or the label of the columns to be used.
By default, it will use the remaining DataFrame numeric columns.
**kwds
Keyword arguments to pass on to :meth:`Series.plot` or :meth:`DataFrame.plot`.
Returns
-------
:class:`plotly.graph_objs.Figure`
Return an custom object when ``backend!=plotly``.
Return an ndarray when ``subplots=True`` (matplotlib-only).
See Also
--------
plotly.express.line : Plot y versus x as lines and/or markers (plotly).
matplotlib.pyplot.plot : Plot y versus x as lines and/or markers (matplotlib).
Examples
--------
Basic plot.
For Series:
.. plotly::
>>> s = ps.Series([1, 3, 2])
>>> s.plot.line() # doctest: +SKIP
For DataFrame:
.. plotly::
The following example shows the populations for some animals
over the years.
>>> df = ps.DataFrame({'pig': [20, 18, 489, 675, 1776],
... 'horse': [4, 25, 281, 600, 1900]},
... index=[1990, 1997, 2003, 2009, 2014])
>>> df.plot.line() # doctest: +SKIP
.. plotly::
The following example shows the relationship between both
populations.
>>> df = ps.DataFrame({'pig': [20, 18, 489, 675, 1776],
... 'horse': [4, 25, 281, 600, 1900]},
... index=[1990, 1997, 2003, 2009, 2014])
>>> df.plot.line(x='pig', y='horse') # doctest: +SKIP
"""
return self(kind="line", x=x, y=y, **kwargs)
def bar(self, x=None, y=None, **kwds):
"""
Vertical bar plot.
Parameters
----------
x : label or position, optional
Allows plotting of one column versus another.
If not specified, the index of the DataFrame is used.
y : label or position, optional
Allows plotting of one column versus another.
If not specified, all numerical columns are used.
**kwds : optional
Additional keyword arguments are documented in
:meth:`pyspark.pandas.Series.plot` or
:meth:`pyspark.pandas.DataFrame.plot`.
Returns
-------
:class:`plotly.graph_objs.Figure`
Return an custom object when ``backend!=plotly``.
Return an ndarray when ``subplots=True`` (matplotlib-only).
Examples
--------
Basic plot.
For Series:
.. plotly::
>>> s = ps.Series([1, 3, 2])
>>> s.plot.bar() # doctest: +SKIP
For DataFrame:
.. plotly::
>>> df = ps.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]})
>>> df.plot.bar(x='lab', y='val') # doctest: +SKIP
Plot a whole dataframe to a bar plot. Each column is stacked with a
distinct color along the horizontal axis.
.. plotly::
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = ps.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> df.plot.bar() # doctest: +SKIP
Instead of stacking, the figure can be split by column with plotly
APIs.
.. plotly::
>>> from plotly.subplots import make_subplots
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = ps.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> fig = (make_subplots(rows=2, cols=1)
... .add_trace(df.plot.bar(y='speed').data[0], row=1, col=1)
... .add_trace(df.plot.bar(y='speed').data[0], row=1, col=1)
... .add_trace(df.plot.bar(y='lifespan').data[0], row=2, col=1))
>>> fig # doctest: +SKIP
Plot a single column.
.. plotly::
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = ps.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> df.plot.bar(y='speed') # doctest: +SKIP
Plot only selected categories for the DataFrame.
.. plotly::
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = ps.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> df.plot.bar(x='lifespan') # doctest: +SKIP
"""
from pyspark.pandas import DataFrame, Series
if isinstance(self.data, Series):
return self(kind="bar", **kwds)
elif isinstance(self.data, DataFrame):
return self(kind="bar", x=x, y=y, **kwds)
def barh(self, x=None, y=None, **kwargs):
"""
Make a horizontal bar plot.
A horizontal bar plot is a plot that presents quantitative data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, default DataFrame.index
Column to be used for categories.
y : label or position, default All numeric columns in dataframe
Columns to be plotted from the DataFrame.
**kwds
Keyword arguments to pass on to
:meth:`pyspark.pandas.DataFrame.plot` or :meth:`pyspark.pandas.Series.plot`.
Returns
-------
:class:`plotly.graph_objs.Figure`
Return an custom object when ``backend!=plotly``.
Return an ndarray when ``subplots=True`` (matplotlib-only).
See Also
--------
plotly.express.bar : Plot a vertical bar plot using plotly.
matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.
Examples
--------
For Series:
.. plotly::
>>> df = ps.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]})
>>> df.val.plot.barh() # doctest: +SKIP
For DataFrame:
.. plotly::
>>> df = ps.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]})
>>> df.plot.barh(x='lab', y='val') # doctest: +SKIP
Plot a whole DataFrame to a horizontal bar plot
.. plotly::
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = ps.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> df.plot.barh() # doctest: +SKIP
Plot a column of the DataFrame to a horizontal bar plot
.. plotly::
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = ps.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> df.plot.barh(y='speed') # doctest: +SKIP
Plot DataFrame versus the desired column
.. plotly::
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = ps.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> df.plot.barh(x='lifespan') # doctest: +SKIP
"""
from pyspark.pandas import DataFrame, Series
if isinstance(self.data, Series):
return self(kind="barh", **kwargs)
elif isinstance(self.data, DataFrame):
return self(kind="barh", x=x, y=y, **kwargs)
def box(self, **kwds):
"""
Make a box plot of the Series columns.
Parameters
----------
**kwds : optional
Additional keyword arguments are documented in
:meth:`pyspark.pandas.Series.plot`.
precision: scalar, default = 0.01
This argument is used by pandas-on-Spark to compute approximate statistics
for building a boxplot. Use *smaller* values to get more precise
statistics (matplotlib-only).
Returns
-------
:class:`plotly.graph_objs.Figure`
Return an custom object when ``backend!=plotly``.
Return an ndarray when ``subplots=True`` (matplotlib-only).
Notes
-----
There are behavior differences between pandas-on-Spark and pandas.
* pandas-on-Spark computes approximate statistics - expect differences between
pandas and pandas-on-Spark boxplots, especially regarding 1st and 3rd quartiles.
* The `whis` argument is only supported as a single number.
* pandas-on-Spark doesn't support the following argument(s) (matplotlib-only).
* `bootstrap` argument is not supported
* `autorange` argument is not supported
Examples
--------
Draw a box plot from a DataFrame with four columns of randomly
generated data.
For Series:
.. plotly::
>>> data = np.random.randn(25, 4)
>>> df = ps.DataFrame(data, columns=list('ABCD'))
>>> df['A'].plot.box() # doctest: +SKIP
This is an unsupported function for DataFrame type
"""
from pyspark.pandas import DataFrame, Series
if isinstance(self.data, Series):
return self(kind="box", **kwds)
elif isinstance(self.data, DataFrame):
return unsupported_function(class_name="pd.DataFrame", method_name="box")()
def hist(self, bins=10, **kwds):
"""
Draw one histogram of the DataFrame’s columns.
A `histogram`_ is a representation of the distribution of data.
This function calls :meth:`plotting.backend.plot`,
on each series in the DataFrame, resulting in one histogram per column.
.. _histogram: https://en.wikipedia.org/wiki/Histogram
Parameters
----------
bins : integer or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
**kwds
All other plotting keyword arguments to be passed to
plotting backend.
Returns
-------
:class:`plotly.graph_objs.Figure`
Return an custom object when ``backend!=plotly``.
Return an ndarray when ``subplots=True`` (matplotlib-only).
Examples
--------
Basic plot.
For Series:
.. plotly::
>>> s = ps.Series([1, 3, 2])
>>> s.plot.hist() # doctest: +SKIP
For DataFrame:
.. plotly::
>>> df = pd.DataFrame(
... np.random.randint(1, 7, 6000),
... columns=['one'])
>>> df['two'] = df['one'] + np.random.randint(1, 7, 6000)
>>> df = ps.from_pandas(df)
>>> df.plot.hist(bins=12, alpha=0.5) # doctest: +SKIP
"""
return self(kind="hist", bins=bins, **kwds)
def kde(self, bw_method=None, ind=None, **kwargs):
"""
Generate Kernel Density Estimate plot using Gaussian kernels.
Parameters
----------
bw_method : scalar
The method used to calculate the estimator bandwidth.
See KernelDensity in PySpark for more information.
ind : NumPy array or integer, optional
Evaluation points for the estimated PDF. If None (default),
1000 equally spaced points are used. If `ind` is a NumPy array, the
KDE is evaluated at the points passed. If `ind` is an integer,
`ind` number of equally spaced points are used.
**kwargs : optional
Keyword arguments to pass on to :meth:`pandas-on-Spark.Series.plot`.
Returns
-------
:class:`plotly.graph_objs.Figure`
Return an custom object when ``backend!=plotly``.
Return an ndarray when ``subplots=True`` (matplotlib-only).
Examples
--------
A scalar bandwidth should be specified. Using a small bandwidth value can
lead to over-fitting, while using a large bandwidth value may result
in under-fitting:
.. plotly::
>>> s = ps.Series([1, 2, 2.5, 3, 3.5, 4, 5])
>>> s.plot.kde(bw_method=0.3) # doctest: +SKIP
.. plotly::
>>> s = ps.Series([1, 2, 2.5, 3, 3.5, 4, 5])
>>> s.plot.kde(bw_method=3) # doctest: +SKIP
The `ind` parameter determines the evaluation points for the
plot of the estimated KDF:
.. plotly::
>>> s = ps.Series([1, 2, 2.5, 3, 3.5, 4, 5])
>>> s.plot.kde(ind=[1, 2, 3, 4, 5], bw_method=0.3) # doctest: +SKIP
For DataFrame, it works in the same way as Series:
.. plotly::
>>> df = ps.DataFrame({
... 'x': [1, 2, 2.5, 3, 3.5, 4, 5],
... 'y': [4, 4, 4.5, 5, 5.5, 6, 6],
... })
>>> df.plot.kde(bw_method=0.3) # doctest: +SKIP
.. plotly::
>>> df = ps.DataFrame({
... 'x': [1, 2, 2.5, 3, 3.5, 4, 5],
... 'y': [4, 4, 4.5, 5, 5.5, 6, 6],
... })
>>> df.plot.kde(bw_method=3) # doctest: +SKIP
.. plotly::
>>> df = ps.DataFrame({
... 'x': [1, 2, 2.5, 3, 3.5, 4, 5],
... 'y': [4, 4, 4.5, 5, 5.5, 6, 6],
... })
>>> df.plot.kde(ind=[1, 2, 3, 4, 5, 6], bw_method=0.3) # doctest: +SKIP
"""
return self(kind="kde", bw_method=bw_method, ind=ind, **kwargs)
density = kde
def area(self, x=None, y=None, **kwds):
"""
Draw a stacked area plot.
An area plot displays quantitative data visually.
This function wraps the plotly area function.
Parameters
----------
x : label or position, optional
Coordinates for the X axis. By default uses the index.
y : label or position, optional
Column to plot. By default uses all columns.
stacked : bool, default True
Area plots are stacked by default. Set to False to create a
unstacked plot (matplotlib-only).
**kwds : optional
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
:class:`plotly.graph_objs.Figure`
Return an custom object when ``backend!=plotly``.
Return an ndarray when ``subplots=True`` (matplotlib-only).
Examples
--------
For Series
.. plotly::
>>> df = ps.DataFrame({
... 'sales': [3, 2, 3, 9, 10, 6],
... 'signups': [5, 5, 6, 12, 14, 13],
... 'visits': [20, 42, 28, 62, 81, 50],
... }, index=pd.date_range(start='2018/01/01', end='2018/07/01',
... freq='M'))
>>> df.sales.plot.area() # doctest: +SKIP
For DataFrame
.. plotly::
>>> df = ps.DataFrame({
... 'sales': [3, 2, 3, 9, 10, 6],
... 'signups': [5, 5, 6, 12, 14, 13],
... 'visits': [20, 42, 28, 62, 81, 50],
... }, index=pd.date_range(start='2018/01/01', end='2018/07/01',
... freq='M'))
>>> df.plot.area() # doctest: +SKIP
"""
from pyspark.pandas import DataFrame, Series
if isinstance(self.data, Series):
return self(kind="area", **kwds)
elif isinstance(self.data, DataFrame):
return self(kind="area", x=x, y=y, **kwds)
def pie(self, **kwds):
"""
Generate a pie plot.
A pie plot is a proportional representation of the numerical data in a
column. This function wraps :meth:`plotly.express.pie` for the
specified column.
Parameters
----------
y : int or label, optional
Label or position of the column to plot.
If not provided, ``subplots=True`` argument must be passed (matplotlib-only).
**kwds
Keyword arguments to pass on to :meth:`pandas-on-Spark.Series.plot`.
Returns
-------
:class:`plotly.graph_objs.Figure`
Return an custom object when ``backend!=plotly``.
Return an ndarray when ``subplots=True`` (matplotlib-only).
Examples
--------
For Series:
.. plotly::
>>> df = ps.DataFrame({'mass': [0.330, 4.87, 5.97],
... 'radius': [2439.7, 6051.8, 6378.1]},
... index=['Mercury', 'Venus', 'Earth'])
>>> df.mass.plot.pie() # doctest: +SKIP
For DataFrame:
.. plotly::
>>> df = ps.DataFrame({'mass': [0.330, 4.87, 5.97],
... 'radius': [2439.7, 6051.8, 6378.1]},
... index=['Mercury', 'Venus', 'Earth'])
>>> df.plot.pie(y='mass') # doctest: +SKIP
"""
from pyspark.pandas import DataFrame, Series
if isinstance(self.data, Series):
return self(kind="pie", **kwds)
else:
# pandas will raise an error if y is None and subplots if not True
if (
isinstance(self.data, DataFrame)
and kwds.get("y", None) is None
and not kwds.get("subplots", False)
):
raise ValueError(
"pie requires either y column or 'subplots=True' (matplotlib-only)"
)
return self(kind="pie", **kwds)
def scatter(self, x, y, **kwds):
"""
Create a scatter plot with varying marker point size and color.
The coordinates of each point are defined by two dataframe columns and
filled circles are used to represent each point. This kind of plot is
useful to see complex correlations between two variables. Points could
be for instance natural 2D coordinates like longitude and latitude in
a map or, in general, any pair of metrics that can be plotted against
each other.
Parameters
----------
x : int or str
The column name or column position to be used as horizontal
coordinates for each point.
y : int or str
The column name or column position to be used as vertical
coordinates for each point.
s : scalar or array_like, optional
(matplotlib-only).
c : str, int or array_like, optional
(matplotlib-only).
**kwds: Optional
Keyword arguments to pass on to :meth:`pyspark.pandas.DataFrame.plot`.
Returns
-------
:class:`plotly.graph_objs.Figure`
Return an custom object when ``backend!=plotly``.
Return an ndarray when ``subplots=True`` (matplotlib-only).
See Also
--------
plotly.express.scatter : Scatter plot using multiple input data
formats (plotly).
matplotlib.pyplot.scatter : Scatter plot using multiple input data
formats (matplotlib).
Examples
--------
Let's see how to draw a scatter plot using coordinates from the values
in a DataFrame's columns.
.. plotly::
>>> df = ps.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1],
... [6.4, 3.2, 1], [5.9, 3.0, 2]],
... columns=['length', 'width', 'species'])
>>> df.plot.scatter(x='length', y='width') # doctest: +SKIP
And now with dark scheme:
.. plotly::
>>> df = ps.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1],
... [6.4, 3.2, 1], [5.9, 3.0, 2]],
... columns=['length', 'width', 'species'])
>>> fig = df.plot.scatter(x='length', y='width')
>>> fig.update_layout(template="plotly_dark") # doctest: +SKIP
"""
return self(kind="scatter", x=x, y=y, **kwds)
def hexbin(self, **kwds):
return unsupported_function(class_name="pd.DataFrame", method_name="hexbin")()
| apache-2.0 |
nre-aachen/GeMpy | gempy/GemPy_f.py | 1 | 13575 | """
Module with classes and methods to perform implicit regional modelling based on
the potential field method.
Tested on Ubuntu 14
Created on 10/10 /2016
@author: Miguel de la Varga
"""
from __future__ import division
import os
from os import path
import sys
# This is for sphenix to find the packages
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
# --DEP
# import theano
# import theano.tensor as T
import numpy as _np
# --DEP-- import pandas as _pn
import warnings
import copy
from gempy.Visualization import PlotData
try:
from gempy.visualization_vtk import visualize, export_vtk_rectilinear
except ModuleNotFoundError:
warnings.warn('Vtk package is not installed. No vtk visualization available.')
from gempy.DataManagement import InputData, InterpolatorInput
from IPython.core.debugger import Tracer
# DEP?
# def rescale_data(geo_data, rescaling_factor=None):
# """
# Rescale the data of a DataManagement object between 0 and 1 due to stability problem of the float32.
# Args:
# geo_data: DataManagement object with the real scale data
# rescaling_factor(float): factor of the rescaling. Default to maximum distance in one the axis
#
# Returns:
#
# """
# max_coord = _pn.concat(
# [geo_data.foliations, geo_data.interfaces]).max()[['X', 'Y', 'Z']]
# min_coord = _pn.concat(
# [geo_data.foliations, geo_data.interfaces]).min()[['X', 'Y', 'Z']]
#
# if not rescaling_factor:
# rescaling_factor = 2*_np.max(max_coord - min_coord)
#
# centers = (max_coord+min_coord)/2
#
# new_coord_interfaces = (geo_data.interfaces[['X', 'Y', 'Z']] -
# centers) / rescaling_factor + 0.5001
#
# new_coord_foliations = (geo_data.foliations[['X', 'Y', 'Z']] -
# centers) / rescaling_factor + 0.5001
#
# new_coord_extent = (geo_data.extent - _np.repeat(centers, 2)) / rescaling_factor + 0.5001
#
# geo_data_rescaled = copy.deepcopy(geo_data)
# geo_data_rescaled.interfaces[['X', 'Y', 'Z']] = new_coord_interfaces
# geo_data_rescaled.foliations[['X', 'Y', 'Z']] = new_coord_foliations
# geo_data_rescaled.extent = new_coord_extent.as_matrix()
#
# geo_data_rescaled.grid.grid = (geo_data.grid.grid - centers.as_matrix()) /rescaling_factor + 0.5001
#
# geo_data_rescaled.rescaling_factor = rescaling_factor
#
# return geo_data_rescaled
# TODO needs to be updated
# def compute_block_model(geo_data, series_number="all",
# series_distribution=None, order_series=None,
# extent=None, resolution=None, grid_type="regular_3D",
# verbose=0, **kwargs):
#
# if extent or resolution:
# set_grid(geo_data, extent=extent, resolution=resolution, grid_type=grid_type, **kwargs)
#
# if series_distribution:
# set_data_series(geo_data, series_distribution=series_distribution, order_series=order_series, verbose=0)
#
# if not getattr(geo_data, 'interpolator', None):
# import warnings
#
# warnings.warn('Using default interpolation values')
# set_interpolator(geo_data)
#
# geo_data.interpolator.tg.final_block.set_value(_np.zeros_like(geo_data.grid.grid[:, 0]))
#
# geo_data.interpolator.compute_block_model(series_number=series_number, verbose=verbose)
#
# return geo_data.interpolator.tg.final_block
def data_to_pickle(geo_data, path=False):
geo_data.data_to_pickle(path)
def read_pickle(path):
import pickle
with open(path, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
data = pickle.load(f)
return data
def get_series(geo_gata):
"""
Args:
geo_gata:
Returns:
"""
return geo_gata.series
def get_grid(geo_data):
return geo_data.grid.grid
def get_resolution(geo_data):
return geo_data.resolution
def get_extent(geo_data):
return geo_data.extent
def get_raw_data(geo_data, dtype='all'):
return geo_data.get_raw_data(itype=dtype)
def create_data(extent, resolution=[50, 50, 50], **kwargs):
"""
Method to initialize the class data. Calling this function some of the data has to be provided (TODO give to
everything a default).
Args:
extent (list or array): [x_min, x_max, y_min, y_max, z_min, z_max]. Extent for the visualization of data
and default of for the grid class.
resolution (list or array): [nx, ny, nz]. Resolution for the visualization of data
and default of for the grid class.
**kwargs: Arbitrary keyword arguments.
Keyword Args:
Resolution ((Optional[list])): [nx, ny, nz]. Defaults to 50
path_i: Path to the data bases of interfaces. Default os.getcwd(),
path_f: Path to the data bases of foliations. Default os.getcwd()
Returns:
GeMpy.DataManagement: Object that encapsulate all raw data of the project
dep: self.Plot(GeMpy_core.PlotData): Object to visualize data and results
"""
return InputData(extent, resolution, **kwargs)
def i_set_data(geo_data, dtype="foliations", action="Open"):
if action == 'Close':
geo_data.i_close_set_data()
if action == 'Open':
geo_data.i_open_set_data(itype=dtype)
def select_series(geo_data, series):
"""
Return the formations of a given serie in string
:param series: list of int or list of str
:return: formations of a given serie in string separeted by |
"""
new_geo_data = copy.deepcopy(geo_data)
if type(series) == int or type(series[0]) == int:
new_geo_data.interfaces = geo_data.interfaces[geo_data.interfaces['order_series'].isin(series)]
new_geo_data.foliations = geo_data.foliations[geo_data.foliations['order_series'].isin(series)]
elif type(series[0]) == str:
new_geo_data.interfaces = geo_data.interfaces[geo_data.interfaces['series'].isin(series)]
new_geo_data.foliations = geo_data.foliations[geo_data.foliations['series'].isin(series)]
return new_geo_data
def set_data_series(geo_data, series_distribution=None, order_series=None,
update_p_field=True, verbose=0):
geo_data.set_series(series_distribution=series_distribution, order=order_series)
try:
if update_p_field:
geo_data.interpolator.compute_potential_fields()
except AttributeError:
pass
if verbose > 0:
return get_series(geo_data)
def set_interfaces(geo_data, interf_Dataframe, append=False, update_p_field=True):
geo_data.set_interfaces(interf_Dataframe, append=append)
# To update the interpolator parameters without calling a new object
try:
geo_data.interpolator._data = geo_data
geo_data.interpolator._grid = geo_data.grid
# geo_data.interpolator._set_constant_parameteres(geo_data, geo_data.interpolator._grid)
if update_p_field:
geo_data.interpolator.compute_potential_fields()
except AttributeError:
pass
def set_foliations(geo_data, foliat_Dataframe, append=False, update_p_field=True):
geo_data.set_foliations(foliat_Dataframe, append=append)
# To update the interpolator parameters without calling a new object
try:
geo_data.interpolator._data = geo_data
geo_data.interpolator._grid = geo_data.grid
# geo_data.interpolator._set_constant_parameteres(geo_data, geo_data.interpolator._grid)
if update_p_field:
geo_data.interpolator.compute_potential_fields()
except AttributeError:
pass
#DEP?
def set_grid(geo_data, new_grid=None, extent=None, resolution=None, grid_type="regular_3D", **kwargs):
"""
Method to initialize the class new_grid. So far is really simple and only has the regular new_grid type
Args:
grid_type (str): regular_3D or regular_2D (I am not even sure if regular 2D still working)
**kwargs: Arbitrary keyword arguments.
Returns:
self.new_grid(GeMpy_core.new_grid): Object that contain different grids
"""
if new_grid is not None:
assert new_grid.shape[1] is 3 and len(new_grid.shape) is 2, 'The shape of new grid must be (n,3) where n is' \
'the number of points of the grid'
geo_data.grid.grid = new_grid
else:
if not extent:
extent = geo_data.extent
if not resolution:
resolution = geo_data.resolution
geo_data.grid = geo_data.GridClass(extent, resolution, grid_type=grid_type, **kwargs)
#DEP?
# def set_interpolator(geo_data, *args, **kwargs):
# """
# Method to initialize the class interpolator. All the constant parameters for the interpolation can be passed
# as args, otherwise they will take the default value (TODO: documentation of the dafault values)
#
# Args:
# *args: Variable length argument list
# **kwargs: Arbitrary keyword arguments.
#
# Keyword Args:
# range_var: Range of the variogram. Default None
# c_o: Covariance at 0. Default None
# nugget_effect: Nugget effect of the gradients. Default 0.01
# u_grade: Grade of the polynomial used in the universal part of the Kriging. Default 2
# rescaling_factor: Magic factor that multiplies the covariances). Default 2
#
# Returns:
# self.Interpolator (GeMpy_core.Interpolator): Object to perform the potential field method
# self.Plot(GeMpy_core.PlotData): Object to visualize data and results. It gets updated.
# """
#
# rescaling_factor = kwargs.get('rescaling_factor', None)
#
# if 'u_grade' in kwargs:
# compile_theano = True
#
# if not getattr(geo_data, 'grid', None):
# set_grid(geo_data)
#
# geo_data_int = rescale_data(geo_data, rescaling_factor=rescaling_factor)
#
# if not getattr(geo_data_int, 'interpolator', None) or compile_theano:
# print('I am in the setting')
# geo_data_int.interpolator = geo_data_int.InterpolatorClass(geo_data_int, geo_data_int.grid,
# *args, **kwargs)
# else:
# geo_data_int.interpolator._data = geo_data_int
# geo_data_int.interpolator._grid = geo_data_int.grid
# geo_data_int.interpolator.set_theano_shared_parameteres(geo_data_int, geo_data_int.interpolator._grid, **kwargs)
#
# return geo_data_int
def plot_data(geo_data, direction="y", series="all", **kwargs):
plot = PlotData(geo_data)
plot.plot_data(direction=direction, series=series, **kwargs)
# TODO saving options
return plot
def plot_section(geo_data, block, cell_number, direction="y", **kwargs):
plot = PlotData(geo_data)
plot.plot_block_section(cell_number, block=block, direction=direction, **kwargs)
# TODO saving options
return plot
def plot_potential_field(geo_data, potential_field, cell_number, n_pf=0,
direction="y", plot_data=True, series="all", *args, **kwargs):
plot = PlotData(geo_data)
plot.plot_potential_field(potential_field, cell_number, n_pf=n_pf,
direction=direction, plot_data=plot_data, series=series,
*args, **kwargs)
def plot_data_3D(geo_data):
r, i = visualize(geo_data)
del r, i
return None
# DEP
# def compute_potential_fields(geo_data, verbose=0):
# geo_data.interpolator.compute_potential_fields(verbose=verbose)
def set_interpolation_data(geo_data, **kwargs):
in_data = InterpolatorInput(geo_data, **kwargs)
return in_data
# =====================================
# Functions for the InterpolatorData
# =====================================
# TODO check that is a interp_data object and if not try to create within the function one from the geo_data
def get_kriging_parameters(interp_data, verbose=0):
return interp_data.interpolator.get_kriging_parameters(verbose=verbose)
def get_th_fn(interp_data, dtype=None, u_grade=None, **kwargs):
"""
Args:
geo_data:
**kwargs:
Returns:
"""
# DEP?
# Choosing float precision for the computation
# if not dtype:
# if theano.config.device == 'gpu':
# dtype = 'float32'
# else:
# print('making float 64')
# dtype = 'float64'
#
# # We make a rescaled version of geo_data for stability reasons
# data_interp = set_interpolator(geo_data, dtype=dtype)
#
# # This are the shared parameters and the compilation of the function. This will be hidden as well at some point
# input_data_T = data_interp.interpolator.tg.input_parameters_list()
#
# # This prepares the user data to the theano function
# #input_data_P = data_interp.interpolator.data_prep(u_grade=u_grade)
#
# # then we compile we have to pass the number of formations that are faults!!
# th_fn = theano.function(input_data_T, data_interp.interpolator.tg.whole_block_model(data_interp.n_faults),
# on_unused_input='ignore',
# allow_input_downcast=True,
# profile=False)
return interp_data.compile_th_fn(dtype=dtype, **kwargs)
def compute_model(interp_data, u_grade=None):
if getattr(interp_data, 'th_th', None):
interp_data.compile_th_fn()
i = interp_data.get_input_data(u_grade=u_grade)
sol = interp_data.th_fn(*i)
return _np.squeeze(sol)
| mit |
mikebenfield/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 86 | 1234 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
lw = 2
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], color='gold', lw=lw,
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), color='teal', lw=lw,
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), color='yellowgreen', lw=lw,
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), color='cornflowerblue', lw=lw,
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, color='orange', lw=lw,
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), color='darkorchid', lw=lw,
linestyle='--', label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y=1, f(x))$")
plt.show()
| bsd-3-clause |
nokute78/fluent-bit | plugins/out_kafka/librdkafka-1.6.0/tests/performance_plot.py | 3 | 2902 | #!/usr/bin/env python3
#
import sys, json
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
def semver2int (semver):
if semver == 'trunk':
semver = '0.10.0.0'
vi = 0
i = 0
for v in reversed(semver.split('.')):
vi += int(v) * (i * 10)
i += 1
return vi
def get_perf_data (perfname, stats):
""" Return [labels,x,y,errs] for perfname 'mb_per_sec' as a numpy arrays
labels: broker versions
x: list with identical value (to plot on same x point)
y: perfname counter (average)
errs: errors
"""
ver = defaultdict(list)
# Per version:
# * accumulate values
# * calculate average
# * calculate error
# Accumulate values per version
for x in stats:
v = str(x[0])
ver[v].append(x[1][perfname])
print('%s is %s' % (perfname, ver))
labels0 = sorted(ver.keys(), key=semver2int)
y0 = list()
errs0 = list()
# Maintain order by using labels0
for v in labels0:
# Calculate average
avg = sum(ver[v]) / float(len(ver[v]))
y0.append(avg)
# Calculate error
errs0.append(max(ver[v]) - avg)
labels = np.array(labels0)
y1 = np.array(y0)
x1 = np.array(range(0, len(labels)))
errs = np.array(errs0)
return [labels,x1,y1,errs]
def plot (description, name, stats, perfname, outfile=None):
labels,x,y,errs = get_perf_data(perfname, stats)
colors = np.random.rand(len(labels))
plt.title('%s: %s %s' % (description, name, perfname))
plt.xlabel('Kafka version')
plt.ylabel(perfname)
plt.errorbar(x, y, yerr=errs, alpha=0.5)
plt.xticks(x, labels, rotation='vertical')
plt.margins(0.2)
plt.subplots_adjust(bottom=0.2)
if outfile is None:
plt.show()
else:
plt.savefig(outfile, bbox_inches='tight')
return
if __name__ == '__main__':
outfile = sys.argv[1]
reports = []
for rf in sys.argv[2:]:
with open(rf) as f:
reports.append(json.load(f))
stats = defaultdict(list)
# Extract performance test data
for rep in reports:
perfs = rep.get('tests', dict()).get('0038_performance', list).get('report', None)
if perfs is None:
continue
for perf in perfs:
for n in ['producer','consumer']:
o = perf.get(n, None)
if o is None:
print('no %s in %s' % (n, perf))
continue
stats[n].append((rep.get('broker_version', 'unknown'), o))
for t in ['producer','consumer']:
for perfname in ['mb_per_sec', 'records_per_sec']:
plot('librdkafka 0038_performance test: %s (%d samples)' % \
(outfile, len(reports)),
t, stats[t], perfname, outfile='%s_%s_%s.png' % (outfile, t, perfname))
| apache-2.0 |
yanchen036/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/ops.py | 7 | 46402 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-core ops for LabeledTensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import types
import numpy as np
from six import string_types
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.ops import random_ops
from tensorflow.python.training import input # pylint: disable=redefined-builtin
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensor, ops.Tensor, core.Axis,
tc.Optional(string_types))
def _gather_1d_on_axis(labeled_tensor, indexer, axis, name=None):
with ops.name_scope(name, 'lt_take', [labeled_tensor]) as scope:
temp_axes = core.Axes([axis] + list(
labeled_tensor.axes.remove(axis.name).values()))
transposed = core.transpose(labeled_tensor, temp_axes.keys())
indexed = core.LabeledTensor(
array_ops.gather(transposed.tensor, indexer), temp_axes)
return core.transpose(indexed, labeled_tensor.axes.keys(), name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types,
tc.Union(slice, collections.Hashable, list)),
tc.Optional(string_types))
def select(labeled_tensor, selection, name=None):
"""Slice out a subset of the tensor.
Args:
labeled_tensor: The input tensor.
selection: A dictionary mapping an axis name to a scalar, slice or list of
values to select. Currently supports two types of selections:
(a) Any number of scalar and/or slice selections.
(b) Exactly one list selection, without any scalars or slices.
name: Optional op name.
Returns:
The selection as a `LabeledTensor`.
Raises:
ValueError: If the tensor doesn't have an axis in the selection or if
that axis lacks labels.
KeyError: If any labels in a selection are not found in the original axis.
NotImplementedError: If you attempt to combine a list selection with
scalar selection or another list selection.
"""
with ops.name_scope(name, 'lt_select', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
slices = {}
indexers = {}
for axis_name, value in selection.items():
if axis_name not in labeled_tensor.axes:
raise ValueError(
'The tensor does not have an axis named %s. Its axes are: %r' %
(axis_name, labeled_tensor.axes.keys()))
axis = labeled_tensor.axes[axis_name]
if axis.labels is None:
raise ValueError(
'The axis named %s does not have labels. The axis is: %r' %
(axis_name, axis))
if isinstance(value, slice):
# TODO(shoyer): consider deprecating using slices in favor of lists
if value.start is None:
start = None
else:
start = axis.index(value.start)
if value.stop is None:
stop = None
else:
# For now, follow the pandas convention of making labeled slices
# inclusive of both bounds.
stop = axis.index(value.stop) + 1
if value.step is not None:
raise NotImplementedError('slicing with a step is not yet supported')
slices[axis_name] = slice(start, stop)
# Needs to be after checking for slices, since slice objects claim to be
# instances of collections.Hashable but hash() on them fails.
elif isinstance(value, collections.Hashable):
slices[axis_name] = axis.index(value)
elif isinstance(value, list):
if indexers:
raise NotImplementedError(
'select does not yet support more than one list selection at '
'the same time')
indexer = [axis.index(v) for v in value]
indexers[axis_name] = ops.convert_to_tensor(indexer, dtype=dtypes.int64)
else:
# If type checking is working properly, this shouldn't be possible.
raise TypeError('cannot handle arbitrary types')
if indexers and slices:
raise NotImplementedError(
'select does not yet support combined scalar and list selection')
# For now, handle array selection separately, because tf.gather_nd does
# not support gradients yet. Later, using gather_nd will let us combine
# these paths.
if indexers:
(axis_name, indexer), = indexers.items()
axis = core.Axis(axis_name, selection[axis_name])
return _gather_1d_on_axis(labeled_tensor, indexer, axis, name=scope)
else:
return core.slice_function(labeled_tensor, slices, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike), string_types,
tc.Optional(string_types))
def concat(labeled_tensors, axis_name, name=None):
"""Concatenate tensors along a dimension.
See tf.concat.
Args:
labeled_tensors: A list of input LabeledTensors.
axis_name: The name of the axis along which to concatenate.
name: Optional op name.
Returns:
The concatenated tensor.
The coordinate labels for the concatenation dimension are also concatenated,
if they are available for every tensor.
Raises:
ValueError: If fewer than one tensor inputs is provided, if the tensors
have incompatible axes, or if `axis_name` isn't the name of an axis.
"""
with ops.name_scope(name, 'lt_concat', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('concat expects at least 1 tensor, but received %s' %
labeled_tensors)
# All tensors must have these axes.
axes_0 = labeled_tensors[0].axes
axis_names = list(axes_0.keys())
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
shared_axes = axes_0.remove(axis_name)
tensors = [labeled_tensors[0].tensor]
concat_axis_list = [axes_0[axis_name]]
for labeled_tensor in labeled_tensors[1:]:
current_shared_axes = labeled_tensor.axes.remove(axis_name)
if current_shared_axes != shared_axes:
# TODO(shoyer): add more specific checks about what went wrong,
# including raising AxisOrderError when appropriate
raise ValueError('Mismatched shared axes: the first tensor '
'had axes %r but this tensor has axes %r.' %
(shared_axes, current_shared_axes))
# Accumulate the axis labels, if they're available.
concat_axis_list.append(labeled_tensor.axes[axis_name])
tensors.append(labeled_tensor.tensor)
concat_axis = core.concat_axes(concat_axis_list)
concat_dimension = axis_names.index(axis_name)
concat_tensor = array_ops.concat(tensors, concat_dimension, name=scope)
values = list(axes_0.values())
concat_axes = (values[:concat_dimension] + [concat_axis] +
values[concat_dimension + 1:])
return core.LabeledTensor(concat_tensor, concat_axes)
# TODO(shoyer): rename pack/unpack to stack/unstack
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike),
tc.Union(string_types, core.AxisLike), int, tc.Optional(string_types))
def pack(labeled_tensors, new_axis, axis_position=0, name=None):
"""Pack tensors along a new axis.
See tf.pack.
Args:
labeled_tensors: The input tensors, which must have identical axes.
new_axis: The name of the new axis, or a tuple containing the name
and coordinate labels.
axis_position: Optional integer position at which to insert the new axis.
name: Optional op name.
Returns:
The packed tensors as a single LabeledTensor, with `new_axis` in the given
`axis_position`.
Raises:
ValueError: If fewer than one input tensors is provided, or if the tensors
don't have identical axes.
"""
with ops.name_scope(name, 'lt_pack', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('pack expects at least 1 tensors, but received %s' %
labeled_tensors)
axes_0 = labeled_tensors[0].axes
for t in labeled_tensors:
if t.axes != axes_0:
raise ValueError('Non-identical axes. Expected %s but got %s' %
(axes_0, t.axes))
pack_op = array_ops.stack(
[t.tensor for t in labeled_tensors], axis=axis_position, name=scope)
axes = list(axes_0.values())
axes.insert(axis_position, new_axis)
return core.LabeledTensor(pack_op, axes)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(core.LabeledTensorLike,
tc.Optional(string_types), tc.Optional(string_types))
def unpack(labeled_tensor, axis_name=None, name=None):
"""Unpack the tensor.
See tf.unpack.
Args:
labeled_tensor: The input tensor.
axis_name: Optional name of axis to unpack. By default, the first axis is
used.
name: Optional op name.
Returns:
The list of unpacked LabeledTensors.
Raises:
ValueError: If `axis_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_unpack', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
axis_names = list(labeled_tensor.axes.keys())
if axis_name is None:
axis_name = axis_names[0]
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
axis = axis_names.index(axis_name)
unpack_ops = array_ops.unstack(labeled_tensor.tensor, axis=axis, name=scope)
axes = [a for i, a in enumerate(labeled_tensor.axes.values()) if i != axis]
return [core.LabeledTensor(t, axes) for t in unpack_ops]
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Collection(string_types),
tc.Collection(tc.Union(string_types, core.AxisLike)),
tc.Optional(string_types))
def reshape(labeled_tensor, existing_axes, new_axes, name=None):
"""Reshape specific axes of a LabeledTensor.
Non-indicated axes remain in their original locations.
Args:
labeled_tensor: The input tensor.
existing_axes: List of axis names found on the input tensor. These must
appear sequentially in the list of axis names on the input. In other
words, they must be a valid slice of `list(labeled_tensor.axes.keys())`.
new_axes: List of strings, tuples of (axis_name, axis_value) or Axis objects
providing new axes with which to replace `existing_axes` in the reshaped
result. At most one element of `new_axes` may be a string, indicating an
axis with unknown size.
name: Optional op name.
Returns:
The reshaped LabeledTensor.
Raises:
ValueError: If `existing_axes` are not all axes on the input, or if more
than one of `new_axes` has unknown size.
AxisOrderError: If `existing_axes` are not a slice of axis names on the
input.
"""
with ops.name_scope(name, 'lt_reshape', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
original_axis_names = list(labeled_tensor.axes.keys())
existing_axes = list(existing_axes)
if not set(existing_axes) <= set(original_axis_names):
raise ValueError('existing_axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_axes, original_axis_names))
start = original_axis_names.index(existing_axes[0])
stop = original_axis_names.index(existing_axes[-1]) + 1
if existing_axes != original_axis_names[start:stop]:
# We could support existing_axes that aren't a slice by using transpose,
# but that could lead to unpredictable performance consequences because
# transposes are not free in TensorFlow. If we did transpose
# automatically, the user might never realize that their data is being
# produced with the wrong order. (The later will occur with some frequency
# because of how broadcasting automatically choose axis order.)
# So for now we've taken the strict approach.
raise core.AxisOrderError(
'existing_axes %r are not a slice of axis names %r on the input '
'labeled tensor. Use `transpose` or `impose_axis_order` to reorder '
'axes on the input explicitly.' %
(existing_axes, original_axis_names))
if sum(isinstance(axis, string_types) for axis in new_axes) > 1:
raise ValueError(
'at most one axis in new_axes can have unknown size. All other '
'axes must have an indicated integer size or labels: %r' % new_axes)
original_values = list(labeled_tensor.axes.values())
axis_size = lambda axis: -1 if axis.size is None else axis.size
shape = [axis_size(axis) for axis in original_values[:start]]
for axis_ref in new_axes:
if isinstance(axis_ref, string_types):
shape.append(-1)
else:
axis = core.as_axis(axis_ref)
shape.append(axis_size(axis))
shape.extend(axis_size(axis) for axis in original_values[stop:])
reshaped_tensor = array_ops.reshape(
labeled_tensor.tensor, shape, name=scope)
axes = original_values[:start] + list(new_axes) + original_values[stop:]
return core.LabeledTensor(reshaped_tensor, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, string_types,
tc.Optional(string_types))
def rename_axis(labeled_tensor, existing_name, new_name, name=None):
"""Rename an axis of LabeledTensor.
Args:
labeled_tensor: The input tensor.
existing_name: Name for an existing axis on the input.
new_name: Desired replacement name.
name: Optional op name.
Returns:
LabeledTensor with renamed axis.
Raises:
ValueError: If `existing_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_rename_axis', [labeled_tensor]) as scope:
if existing_name not in labeled_tensor.axes:
raise ValueError('existing_name %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_name, labeled_tensor.axes.keys()))
new_axis = core.Axis(new_name, labeled_tensor.axes[existing_name].value)
return reshape(labeled_tensor, [existing_name], [new_axis], name=scope)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(string_types, collections.Callable, int, bool,
tc.Collection(core.LabeledTensorLike), bool,
tc.Optional(string_types))
def _batch_helper(default_name,
batch_fn,
batch_size,
enqueue_many,
labeled_tensors,
allow_smaller_final_batch,
name=None):
with ops.name_scope(name, default_name, labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
batch_ops = batch_fn([t.tensor for t in labeled_tensors], scope)
# TODO(shoyer): Remove this when they sanitize the TF API.
if not isinstance(batch_ops, list):
assert isinstance(batch_ops, ops.Tensor)
batch_ops = [batch_ops]
if allow_smaller_final_batch:
batch_size = None
@tc.returns(core.Axes)
@tc.accepts(core.Axes)
def output_axes(axes):
if enqueue_many:
if 'batch' not in axes or list(axes.keys()).index('batch') != 0:
raise ValueError(
'When enqueue_many is True, input tensors must have an axis '
'called "batch" as their first dimension, '
'but axes were %s' % axes)
culled_axes = axes.remove('batch')
return core.Axes([('batch', batch_size)] + list(culled_axes.values()))
else:
return core.Axes([('batch', batch_size)] + list(axes.values()))
output_labeled_tensors = []
for i, tensor in enumerate(batch_ops):
axes = output_axes(labeled_tensors[i].axes)
output_labeled_tensors.append(core.LabeledTensor(tensor, axes))
return output_labeled_tensors
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, bool,
tc.Optional(string_types))
def batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, int,
tc.Optional(int), bool, tc.Optional(string_types))
def shuffle_batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
min_after_dequeue=0,
seed=None,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor, with shuffling.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
min_after_dequeue: Minimum number of elements in the queue after a dequeue,
used to ensure mixing.
seed: Optional random seed.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.shuffle_batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
min_after_dequeue=min_after_dequeue,
seed=seed,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_shuffle_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types, int),
tc.Optional(int), tc.Optional(string_types))
def random_crop(labeled_tensor, shape_map, seed=None, name=None):
"""Randomly crops a tensor to a given size.
See tf.random_crop.
Args:
labeled_tensor: The input tensor.
shape_map: A dictionary mapping axis names to the size of the random crop
for that dimension.
seed: An optional random seed.
name: An optional op name.
Returns:
A tensor of the same rank as `labeled_tensor`, cropped randomly in the
selected dimensions.
Raises:
ValueError: If the shape map contains an axis name not in the input tensor.
"""
with ops.name_scope(name, 'lt_random_crop', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
for axis_name in shape_map:
if axis_name not in labeled_tensor.axes:
raise ValueError('Selection axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
shape = []
axes = []
for axis in labeled_tensor.axes.values():
if axis.name in shape_map:
size = shape_map[axis.name]
shape.append(size)
# We lose labels for the axes we crop, leaving just the size.
axes.append((axis.name, size))
else:
shape.append(len(axis))
axes.append(axis)
crop_op = random_ops.random_crop(
labeled_tensor.tensor, shape, seed=seed, name=scope)
return core.LabeledTensor(crop_op, axes)
# TODO(shoyer): Allow the user to select the axis over which to map.
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
tc.Optional(string_types))
def map_fn(fn, labeled_tensor, name=None):
"""Map on the list of tensors unpacked from labeled_tensor.
See tf.map_fn.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type LabeledTensor -> LabeledTensor.
labeled_tensor: The input tensor.
name: Optional op name.
Returns:
A tensor that packs the results of applying fn to the list of tensors
unpacked from labeled_tensor.
"""
with ops.name_scope(name, 'lt_map_fn', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
unpack_lts = unpack(labeled_tensor)
# TODO(ericmc): Fix this upstream.
if labeled_tensor.dtype == dtypes.string:
# We must construct the full graph here, because functional_ops.map_fn
# doesn't work for string-valued tensors.
# Constructing the full graph may be slow.
map_lts = [fn(t) for t in unpack_lts]
return pack(map_lts, list(labeled_tensor.axes.values())[0], name=scope)
else:
# Figure out what the axis labels should be, but use tf.map_fn to
# construct the graph because it's efficient.
# It may be slow to construct the full graph, so we infer the labels from
# the first element.
# TODO(ericmc): This builds a subgraph which then gets thrown away.
# Find a more elegant solution.
first_map_lt = fn(unpack_lts[0])
final_axes = list(labeled_tensor.axes.values())[:1] + list(
first_map_lt.axes.values())
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor)
def tf_fn(tensor):
original_axes = list(labeled_tensor.axes.values())[1:]
tensor_lt = core.LabeledTensor(tensor, original_axes)
return fn(tensor_lt).tensor
map_op = functional_ops.map_fn(tf_fn, labeled_tensor.tensor)
map_lt = core.LabeledTensor(map_op, final_axes)
return core.identity(map_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def foldl(fn, labeled_tensor, initial_value, name=None):
"""Left fold on the list of tensors unpacked from labeled_tensor.
See tf.foldl.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type (LabeledTensor, LabeledTensor) -> LabeledTensor.
Its arguments are (accumulated_value, next_value).
labeled_tensor: The input tensor.
initial_value: The initial value of the accumulator.
name: Optional op name.
Returns:
The accumulated value.
"""
with ops.name_scope(name, 'lt_foldl',
[labeled_tensor, initial_value]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
initial_value = core.convert_to_labeled_tensor(initial_value)
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor, ops.Tensor)
def tf_fn(accumulator, next_element):
accumulator_lt = core.LabeledTensor(accumulator, initial_value.axes)
next_element_lt = core.LabeledTensor(
next_element, list(labeled_tensor.axes.values())[1:])
return fn(accumulator_lt, next_element_lt).tensor
foldl_op = functional_ops.foldl(
tf_fn, labeled_tensor.tensor, initializer=initial_value.tensor)
foldl_lt = core.LabeledTensor(foldl_op, initial_value.axes)
return core.identity(foldl_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(tc.Collection(string_types)), tc.Optional(string_types))
def squeeze(labeled_tensor, axis_names=None, name=None):
"""Remove size-1 dimensions.
See tf.squeeze.
Args:
labeled_tensor: The input tensor.
axis_names: The names of the dimensions to remove, or None to remove
all size-1 dimensions.
name: Optional op name.
Returns:
A tensor with the specified dimensions removed.
Raises:
ValueError: If the named axes are not in the tensor, or if they are
not size-1.
"""
with ops.name_scope(name, 'lt_squeeze', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axis_names is None:
axis_names = [a.name for a in labeled_tensor.axes.values() if len(a) == 1]
for axis_name in axis_names:
if axis_name not in labeled_tensor.axes:
raise ValueError('axis %s is not in tensor axes %s' %
(axis_name, labeled_tensor.axes))
elif len(labeled_tensor.axes[axis_name]) != 1:
raise ValueError(
'cannot squeeze axis with size greater than 1: (%s, %s)' %
(axis_name, labeled_tensor.axes[axis_name]))
squeeze_dimensions = []
axes = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in axis_names:
squeeze_dimensions.append(i)
else:
axes.append(axis)
if squeeze_dimensions:
squeeze_op = array_ops.squeeze(
labeled_tensor.tensor, squeeze_dimensions, name=scope)
else:
squeeze_op = array_ops.identity(labeled_tensor.tensor, name=scope)
return core.LabeledTensor(squeeze_op, axes)
# pylint: disable=invalid-name
ReduceAxis = tc.Union(string_types,
tc.Tuple(string_types, collections.Hashable))
ReduceAxes = tc.Optional(tc.Union(ReduceAxis, tc.Collection(ReduceAxis)))
# pylint: enable=invalid-name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def matmul(a, b, name=None):
"""Matrix multiply two tensors with rank 1 or 2.
If both tensors have rank 2, a matrix-matrix product is performed.
If one tensor has rank 1 and the other has rank 2, then a matrix-vector
product is performed.
If both tensors have rank 1, then a vector dot-product is performed.
(This behavior matches that of `numpy.dot`.)
Both tensors must share exactly one dimension in common, which is the
dimension the operation is summed along. The inputs will be automatically
transposed if necessary as part of the matmul op.
We intend to eventually support `matmul` on higher rank input, and also
eventually support summing over any number shared dimensions (via an `axis`
argument), but neither of these features has been implemented yet.
Args:
a: First LabeledTensor.
b: Second LabeledTensor.
name: Optional op name.
Returns:
LabeledTensor with the result of matrix multiplication. Axes are ordered by
the current axis_order_scope, if set, or in or order of appearance on the
inputs.
Raises:
NotImplementedError: If inputs have rank >2 or share multiple axes.
ValueError: If the inputs have rank 0 or do not share any axes.
"""
with ops.name_scope(name, 'lt_matmul', [a, b]) as scope:
a = core.convert_to_labeled_tensor(a)
b = core.convert_to_labeled_tensor(b)
if len(a.axes) > 2 or len(b.axes) > 2:
# We could pass batched inputs to tf.matmul to make this work, but we
# would also need to use tf.tile and/or tf.transpose. These are more
# expensive than doing reshapes, so it's not clear if it's a good idea to
# do this automatically.
raise NotImplementedError(
'matmul currently requires inputs with rank 2 or less, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
if not a.axes or not b.axes:
raise ValueError(
'matmul currently requires inputs with at least rank 1, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
shared_axes = set(a.axes) & set(b.axes)
if len(shared_axes) > 1:
raise NotImplementedError(
'matmul does not yet support summing over multiple shared axes: %r. '
'Use transpose and reshape to create a single shared axis to sum '
'over.' % shared_axes)
if not shared_axes:
raise ValueError('there must have exactly one axis in common between '
'input to matmul: %r, %r' %
(a.axes.keys(), b.axes.keys()))
shared_axis, = shared_axes
if a.axes[shared_axis] != b.axes[shared_axis]:
raise ValueError('axis %r does not match on input arguments: %r vs %r' %
(shared_axis, a.axes[shared_axis].value,
b.axes[shared_axis].value))
result_axes = []
for axes in [a.axes, b.axes]:
for axis in axes.values():
if axis.name != shared_axis:
result_axes.append(axis)
axis_scope_order = core.get_axis_order()
if axis_scope_order is not None:
result_axis_names = [axis.name for axis in result_axes]
new_axis_names = [
name for name in axis_scope_order if name in result_axis_names
]
if new_axis_names != result_axis_names:
# switch a and b
b, a = a, b
# result_axes is a list of length 1 or 2
result_axes = result_axes[::-1]
squeeze_dims = []
if len(a.axes) == 1:
a_tensor = array_ops.reshape(a.tensor, (1, -1))
squeeze_dims.append(0)
transpose_a = False
else:
a_tensor = a.tensor
transpose_a = list(a.axes.keys()).index(shared_axis) == 0
if len(b.axes) == 1:
b_tensor = array_ops.reshape(b.tensor, (-1, 1))
squeeze_dims.append(1)
transpose_b = False
else:
b_tensor = b.tensor
transpose_b = list(b.axes.keys()).index(shared_axis) == 1
result_op = math_ops.matmul(
a_tensor, b_tensor, transpose_a=transpose_a, transpose_b=transpose_b)
if squeeze_dims:
result_op = array_ops.squeeze(result_op, squeeze_dims)
result_op = array_ops.identity(result_op, name=scope)
return core.LabeledTensor(result_op, result_axes)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_reduce_op(op_name, reduce_fn):
"""Define a reduction op for labeled tensors.
Args:
op_name: string name of the TensorFlow op.
reduce_fn: function to call to evaluate the op on a tf.Tensor.
Returns:
Function defining the given reduction op that acts on a LabeledTensor.
"""
default_name = 'lt_%s' % op_name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, ReduceAxes, tc.Optional(string_types))
def op(labeled_tensor, axes=None, name=None):
"""Computes the given reduction across the given axes of a LabeledTensor.
See `tf.{op_name}` for full details.
Args:
labeled_tensor: The input tensor.
axes: A set of axes or None.
If None, all axes will be reduced.
Axes must all be strings, in which case those dimensions will be
removed, or pairs of (name, None) or (name, label), in which case those
dimensions will be kept.
name: Optional op name.
Returns:
The reduced LabeledTensor.
Raises:
ValueError: if any of the axes to reduce over are not found on
`labeled_tensor`.
"""
with ops.name_scope(name, default_name, [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axes is None:
axes = labeled_tensor.axes.keys()
if isinstance(axes, (string_types, tuple)):
axes = [axes]
reduction_axes = {}
axes_to_squeeze = []
for a in axes:
if isinstance(a, string_types):
# We squeeze out this axis.
reduction_axes[a] = a
axes_to_squeeze.append(a)
else:
# We keep this axis, with the user-provided labels.
(axis_name, label) = a
if label is not None:
# The input was a single label, so make it a list so it can be
# turned into an Axis.
label = [label]
reduction_axes[axis_name] = (axis_name, label)
for axis_name in reduction_axes:
if axis_name not in labeled_tensor.axes:
raise ValueError('Axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
intermediate_axes = []
reduction_dimensions = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in reduction_axes:
intermediate_axes.append(reduction_axes[axis.name])
reduction_dimensions.append(i)
else:
intermediate_axes.append(axis)
reduce_op = reduce_fn(
labeled_tensor.tensor, reduction_dimensions, keepdims=True)
reduce_lt = core.LabeledTensor(reduce_op, intermediate_axes)
return squeeze(reduce_lt, axes_to_squeeze, name=scope)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
reduce_all = define_reduce_op('reduce_all', math_ops.reduce_all)
reduce_any = define_reduce_op('reduce_any', math_ops.reduce_any)
reduce_logsumexp = define_reduce_op('reduce_logsumexp',
math_ops.reduce_logsumexp)
reduce_max = define_reduce_op('reduce_max', math_ops.reduce_max)
reduce_mean = define_reduce_op('reduce_mean', math_ops.reduce_mean)
reduce_min = define_reduce_op('reduce_min', math_ops.reduce_min)
reduce_prod = define_reduce_op('reduce_prod', math_ops.reduce_prod)
reduce_sum = define_reduce_op('reduce_sum', math_ops.reduce_sum)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Union(int, ops.Tensor)),
tc.Optional(string_types))
def tile(labeled_tensor, multiples, name=None):
"""Constructs a tensor by tiling a given tensor.
Only axes without tick-labels can be tiled. (Otherwise, axis labels on tiled
tensors would no longer be unique.)
See lt.tile.
Args:
labeled_tensor: The input tensor.
multiples: A mapping where the keys are axis names and the values are the
integer number of times to tile along that axis. Only axes with a multiple
different than 1 need be included.
name: Optional op name.
Returns:
A tensor with the indicated axes tiled.
Raises:
ValueError: If the tiled axes are not axes in the input tensor, or if any
axes in multiples have tick labels.
"""
with ops.name_scope(name, 'lt_tile', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(multiples.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('tile axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(multiples.keys(), labeled_tensor.axes))
labeled_axes = [
name for name in multiples
if labeled_tensor.axes[name].labels is not None
]
if labeled_axes:
raise ValueError('cannot tile axes with tick labels: %r' % labeled_axes)
multiples_list = [multiples.get(name, 1) for name in labeled_tensor.axes]
tile_op = array_ops.tile(labeled_tensor.tensor, multiples_list, name=scope)
new_axes = [
axis.name if axis.labels is None else axis
for axis in labeled_tensor.axes.values()
]
return core.LabeledTensor(tile_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Tuple(core.AxisValue, core.AxisValue)),
string_types, tc.Optional(string_types))
def pad(labeled_tensor, paddings, mode='CONSTANT', name=None):
"""Pads a tensor.
See tf.pad.
Args:
labeled_tensor: The input tensor.
paddings: A mapping where the keys are axis names and the values are
tuples where the first element is the padding to insert at the beginning
of the axis and the second is the padding to insert at the end of the
axis.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC".
name: Optional op name.
Returns:
A tensor with the indicated axes padded, optionally with those axes extended
with the provided labels.
Raises:
ValueError: If the padded axes are not axes in the input tensor.
"""
with ops.name_scope(name, 'lt_pad', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(paddings.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('pad axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(paddings.keys(), labeled_tensor.axes))
new_axes = []
padding_pairs = []
for name, axis in labeled_tensor.axes.items():
if name in paddings:
padding_before, padding_after = paddings[name]
axis_before = core.Axis(name, padding_before)
axis_after = core.Axis(name, padding_after)
new_axes.append(core.concat_axes([axis_before, axis, axis_after]))
padding_pairs.append((len(axis_before), len(axis_after)))
else:
new_axes.append(axis)
padding_pairs.append((0, 0))
pad_op = array_ops.pad(labeled_tensor.tensor,
padding_pairs,
mode,
name=scope)
return core.LabeledTensor(pad_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Union(np.ndarray, list, tuple, core.Scalar),
tc.Optional(dtypes.DType),
tc.Optional(
tc.Union(core.Axes, tc.Collection(
tc.Union(string_types, core.AxisLike)))), tc.Optional(string_types))
def constant(value, dtype=None, axes=None, name=None):
"""Creates a constant tensor.
If `axes` includes any strings, shape is inferred from `value`. Otherwise,
the sizes of the given `axes` are used to set `shape` for `tf.constant`.
See tf.constant for more details.
Args:
value: The input tensor.
dtype: The type of the returned tensor.
axes: Optional Axes, list of strings or list of objects coercible to Axis
objects. By default, axes are assumed to be an empty list (i.e., `value`
is treated as a scalar).
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_constant', [value]) as scope:
if axes is None:
axes = []
if isinstance(axes, core.Axes):
axes = axes.values()
if any(isinstance(ax, string_types) for ax in axes):
# need to infer shape
shape = None
else:
# axes already indicate shape
axes = [core.as_axis(a) for a in axes]
shape = [a.size for a in axes]
op = array_ops.constant(value, dtype=dtype, shape=shape, name=scope)
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def zeros_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to zero.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_zeros_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.zeros_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def ones_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to one.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to one.
"""
with ops.name_scope(name, 'lt_ones_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.ones_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def cast(labeled_tensor, dtype=None, name=None):
"""Casts a labeled tensor to a new type.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
A labeled tensor with the new dtype.
"""
with ops.name_scope(name, 'lt_cast', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = math_ops.cast(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, tc.Optional(string_types))
def verify_tensor_all_finite(labeled_tensor, message, name=None):
"""Asserts a tensor doesn't contain NaNs or Infs.
See tf.verify_tensor_all_finite.
Args:
labeled_tensor: The input tensor.
message: Message to log on failure.
name: Optional op name.
Returns:
The input tensor.
"""
with ops.name_scope(name, 'lt_verify_tensor_all_finite',
[labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = numerics.verify_tensor_all_finite(
labeled_tensor.tensor, msg=message, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def boolean_mask(labeled_tensor, mask, name=None):
"""Apply a boolean mask to a labeled tensor.
Unlike `tf.boolean_mask`, this currently only works on 1-dimensional masks.
The mask is applied to the first axis of `labeled_tensor`. Labels on the first
axis are removed, because True indices in `mask` may not be known dynamically.
Args:
labeled_tensor: The input tensor.
mask: The type of the returned tensor.
name: Optional op name.
Returns:
The masked labeled tensor.
Raises:
ValueError: if the first axis of the mask
"""
with ops.name_scope(name, 'lt_boolean_mask', [labeled_tensor, mask]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
mask = core.convert_to_labeled_tensor(mask)
if len(mask.axes) > 1:
raise NotImplementedError(
"LabeledTensor's boolean_mask currently only supports 1D masks")
mask_axis = list(mask.axes.values())[0]
lt_axis = list(labeled_tensor.axes.values())[0]
if mask_axis != lt_axis:
raise ValueError('the first axis of the labeled tensor and the mask '
'are not equal:\n%r\n%r' % (lt_axis, mask_axis))
op = array_ops.boolean_mask(labeled_tensor.tensor, mask.tensor, name=scope)
# TODO(shoyer): attempt to infer labels for the masked values, by calling
# tf.contrib.util.constant_value on the mask?
axes = [lt_axis.name] + list(labeled_tensor.axes.values())[1:]
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def where(condition, x, y, name=None):
"""Return elements from x or y depending on condition.
See `tf.where` for more details. This function currently only implements the
three argument version of where.
Args:
condition: LabeledTensor of type `bool`.
x: LabeledTensor for values where condition is true.
y: LabeledTensor for values where condition is false.
name: Optional op name.
Returns:
The labeled tensor with values according to condition.
Raises:
ValueError: if `x` and `y` have different axes, or if the axes of `x` do not
start with the axes of `condition`.
"""
with ops.name_scope(name, 'lt_where', [condition, x, y]) as scope:
condition = core.convert_to_labeled_tensor(condition)
x = core.convert_to_labeled_tensor(x)
y = core.convert_to_labeled_tensor(y)
if not condition.axes == x.axes == y.axes:
raise ValueError('all inputs to `where` must have equal axes')
op = array_ops.where(condition.tensor, x.tensor, y.tensor, name=scope)
return core.LabeledTensor(op, x.axes)
| apache-2.0 |
jreback/pandas | pandas/tests/series/apply/test_series_apply.py | 1 | 29906 | from collections import Counter, defaultdict
from itertools import chain
import numpy as np
import pytest
from pandas.core.dtypes.common import is_number
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, isna, timedelta_range
import pandas._testing as tm
from pandas.core.base import SpecificationError
class TestSeriesApply:
def test_series_map_box_timedelta(self):
# GH#11349
ser = Series(timedelta_range("1 day 1 s", periods=5, freq="h"))
def f(x):
return x.total_seconds()
ser.map(f)
ser.apply(f)
DataFrame(ser).applymap(f)
def test_apply(self, datetime_series):
with np.errstate(all="ignore"):
tm.assert_series_equal(
datetime_series.apply(np.sqrt), np.sqrt(datetime_series)
)
# element-wise apply
import math
tm.assert_series_equal(
datetime_series.apply(math.exp), np.exp(datetime_series)
)
# empty series
s = Series(dtype=object, name="foo", index=Index([], name="bar"))
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
# check all metadata (GH 9322)
assert s is not rs
assert s.index is rs.index
assert s.dtype == rs.dtype
assert s.name == rs.name
# index but no data
s = Series(index=[1, 2, 3], dtype=np.float64)
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
def test_apply_same_length_inference_bug(self):
s = Series([1, 2])
def f(x):
return (x, x + 1)
result = s.apply(f)
expected = s.map(f)
tm.assert_series_equal(result, expected)
s = Series([1, 2, 3])
result = s.apply(f)
expected = s.map(f)
tm.assert_series_equal(result, expected)
def test_apply_dont_convert_dtype(self):
s = Series(np.random.randn(10))
def f(x):
return x if x > 0 else np.nan
result = s.apply(f, convert_dtype=False)
assert result.dtype == object
def test_with_string_args(self, datetime_series):
for arg in ["sum", "mean", "min", "max", "std"]:
result = datetime_series.apply(arg)
expected = getattr(datetime_series, arg)()
assert result == expected
def test_apply_args(self):
s = Series(["foo,bar"])
result = s.apply(str.split, args=(",",))
assert result[0] == ["foo", "bar"]
assert isinstance(result[0], list)
def test_series_map_box_timestamps(self):
# GH#2689, GH#2627
ser = Series(pd.date_range("1/1/2000", periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
ser.map(func)
ser.apply(func)
def test_apply_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]
s = Series(vals)
assert s.dtype == "datetime64[ns]"
# boxed value must be Timestamp instance
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = Series(["Timestamp_1_None", "Timestamp_2_None"])
tm.assert_series_equal(res, exp)
vals = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
]
s = Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")]
s = Series(vals)
assert s.dtype == "timedelta64[ns]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.days}")
exp = Series(["Timedelta_1", "Timedelta_2"])
tm.assert_series_equal(res, exp)
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = Series(vals)
assert s.dtype == "Period[M]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.freqstr}")
exp = Series(["Period_M", "Period_M"])
tm.assert_series_equal(res, exp)
def test_apply_datetimetz(self):
values = pd.date_range("2011-01-01", "2011-01-02", freq="H").tz_localize(
"Asia/Tokyo"
)
s = Series(values, name="XX")
result = s.apply(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range("2011-01-02", "2011-01-03", freq="H").tz_localize(
"Asia/Tokyo"
)
exp = Series(exp_values, name="XX")
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.apply(lambda x: x.hour)
exp = Series(list(range(24)) + [0], name="XX", dtype=np.int64)
tm.assert_series_equal(result, exp)
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = Series(["Asia/Tokyo"] * 25, name="XX")
tm.assert_series_equal(result, exp)
def test_apply_dict_depr(self):
tsdf = DataFrame(
np.random.randn(10, 3),
columns=["A", "B", "C"],
index=pd.date_range("1/1/2000", periods=10),
)
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
tsdf.A.agg({"foo": ["sum", "mean"]})
def test_apply_categorical(self):
values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)
ser = Series(values, name="XX", index=list("abcdefg"))
result = ser.apply(lambda x: x.lower())
# should be categorical dtype when the number of categories are
# the same
values = pd.Categorical(list("abbabcd"), categories=list("dcba"), ordered=True)
exp = Series(values, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp.values)
result = ser.apply(lambda x: "A")
exp = Series(["A"] * 7, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
assert result.dtype == object
@pytest.mark.parametrize("series", [["1-1", "1-1", np.NaN], ["1-1", "1-2", np.NaN]])
def test_apply_categorical_with_nan_values(self, series):
# GH 20714 bug fixed in: GH 24275
s = Series(series, dtype="category")
result = s.apply(lambda x: x.split("-")[0])
result = result.astype(object)
expected = Series(["1", "1", np.NaN], dtype="category")
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
def test_apply_empty_integer_series_with_datetime_index(self):
# GH 21245
s = Series([], index=pd.date_range(start="2018-01-01", periods=0), dtype=int)
result = s.apply(lambda x: x)
tm.assert_series_equal(result, s)
class TestSeriesAggregate:
def test_transform(self, string_series):
# transforming functions
with np.errstate(all="ignore"):
f_sqrt = np.sqrt(string_series)
f_abs = np.abs(string_series)
# ufunc
result = string_series.apply(np.sqrt)
expected = f_sqrt.copy()
tm.assert_series_equal(result, expected)
# list-like
result = string_series.apply([np.sqrt])
expected = f_sqrt.to_frame().copy()
expected.columns = ["sqrt"]
tm.assert_frame_equal(result, expected)
result = string_series.apply(["sqrt"])
tm.assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both functions per
# series and then concatting
expected = pd.concat([f_sqrt, f_abs], axis=1)
expected.columns = ["sqrt", "absolute"]
result = string_series.apply([np.sqrt, np.abs])
tm.assert_frame_equal(result, expected)
# dict, provide renaming
expected = pd.concat([f_sqrt, f_abs], axis=1)
expected.columns = ["foo", "bar"]
expected = expected.unstack().rename("series")
result = string_series.apply({"foo": np.sqrt, "bar": np.abs})
tm.assert_series_equal(result.reindex_like(expected), expected)
def test_transform_and_agg_error(self, string_series):
# we are trying to transform with an aggregator
msg = "cannot combine transform and aggregation"
with pytest.raises(ValueError, match=msg):
with np.errstate(all="ignore"):
string_series.agg(["sqrt", "max"])
msg = "cannot perform both aggregation and transformation"
with pytest.raises(ValueError, match=msg):
with np.errstate(all="ignore"):
string_series.agg({"foo": np.sqrt, "bar": "sum"})
def test_demo(self):
# demonstration tests
s = Series(range(6), dtype="int64", name="series")
result = s.agg(["min", "max"])
expected = Series([0, 5], index=["min", "max"], name="series")
tm.assert_series_equal(result, expected)
result = s.agg({"foo": "min"})
expected = Series([0], index=["foo"], name="series")
tm.assert_series_equal(result, expected)
# nested renaming
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
s.agg({"foo": ["min", "max"]})
def test_multiple_aggregators_with_dict_api(self):
s = Series(range(6), dtype="int64", name="series")
# nested renaming
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
s.agg({"foo": ["min", "max"], "bar": ["sum", "mean"]})
def test_agg_apply_evaluate_lambdas_the_same(self, string_series):
# test that we are evaluating row-by-row first
# before vectorized evaluation
result = string_series.apply(lambda x: str(x))
expected = string_series.agg(lambda x: str(x))
tm.assert_series_equal(result, expected)
result = string_series.apply(str)
expected = string_series.agg(str)
tm.assert_series_equal(result, expected)
def test_with_nested_series(self, datetime_series):
# GH 2316
# .agg with a reducer and a transform, what to do
result = datetime_series.apply(
lambda x: Series([x, x ** 2], index=["x", "x^2"])
)
expected = DataFrame({"x": datetime_series, "x^2": datetime_series ** 2})
tm.assert_frame_equal(result, expected)
result = datetime_series.agg(lambda x: Series([x, x ** 2], index=["x", "x^2"]))
tm.assert_frame_equal(result, expected)
def test_replicate_describe(self, string_series):
# this also tests a result set that is all scalars
expected = string_series.describe()
result = string_series.apply(
{
"count": "count",
"mean": "mean",
"std": "std",
"min": "min",
"25%": lambda x: x.quantile(0.25),
"50%": "median",
"75%": lambda x: x.quantile(0.75),
"max": "max",
}
)
tm.assert_series_equal(result, expected)
def test_reduce(self, string_series):
# reductions with named functions
result = string_series.agg(["sum", "mean"])
expected = Series(
[string_series.sum(), string_series.mean()],
["sum", "mean"],
name=string_series.name,
)
tm.assert_series_equal(result, expected)
def test_non_callable_aggregates(self):
# test agg using non-callable series attributes
s = Series([1, 2, None])
# Calling agg w/ just a string arg same as calling s.arg
result = s.agg("size")
expected = s.size
assert result == expected
# test when mixed w/ callable reducers
result = s.agg(["size", "count", "mean"])
expected = Series({"size": 3.0, "count": 2.0, "mean": 1.5})
tm.assert_series_equal(result[expected.index], expected)
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series(dtype=np.float64),
[
("sum", 0),
("max", np.nan),
("min", np.nan),
("all", True),
("any", False),
("mean", np.nan),
("prod", 1),
("std", np.nan),
("var", np.nan),
("median", np.nan),
],
),
tm.get_cython_table_params(
Series([np.nan, 1, 2, 3]),
[
("sum", 6),
("max", 3),
("min", 1),
("all", True),
("any", True),
("mean", 2),
("prod", 6),
("std", 1),
("var", 1),
("median", 2),
],
),
tm.get_cython_table_params(
Series("a b c".split()),
[
("sum", "abc"),
("max", "c"),
("min", "a"),
("all", "c"), # see GH12863
("any", "a"),
],
),
),
)
def test_agg_cython_table(self, series, func, expected):
# GH21224
# test reducing functions in
# pandas.core.base.SelectionMixin._cython_table
result = series.agg(func)
if is_number(expected):
assert np.isclose(result, expected, equal_nan=True)
else:
assert result == expected
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series(dtype=np.float64),
[
("cumprod", Series([], Index([]), dtype=np.float64)),
("cumsum", Series([], Index([]), dtype=np.float64)),
],
),
tm.get_cython_table_params(
Series([np.nan, 1, 2, 3]),
[
("cumprod", Series([np.nan, 1, 2, 6])),
("cumsum", Series([np.nan, 1, 3, 6])),
],
),
tm.get_cython_table_params(
Series("a b c".split()), [("cumsum", Series(["a", "ab", "abc"]))]
),
),
)
def test_agg_cython_table_transform(self, series, func, expected):
# GH21224
# test transforming functions in
# pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)
result = series.agg(func)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series("a b c".split()),
[
("mean", TypeError), # mean raises TypeError
("prod", TypeError),
("std", TypeError),
("var", TypeError),
("median", TypeError),
("cumprod", TypeError),
],
)
),
)
def test_agg_cython_table_raises(self, series, func, expected):
# GH21224
msg = r"[Cc]ould not convert|can't multiply sequence by non-int of type"
with pytest.raises(expected, match=msg):
# e.g. Series('a b'.split()).cumprod() will raise
series.agg(func)
def test_series_apply_no_suffix_index(self):
# GH36189
s = Series([4] * 3)
result = s.apply(["sum", lambda x: x.sum(), lambda x: x.sum()])
expected = Series([12, 12, 12], index=["sum", "<lambda>", "<lambda>"])
tm.assert_series_equal(result, expected)
class TestSeriesMap:
def test_map(self, datetime_series):
index, data = tm.getMixedTypeDict()
source = Series(data["B"], index=data["C"])
target = Series(data["C"][:4], index=data["D"][:4])
merged = target.map(source)
for k, v in merged.items():
assert v == source[target[k]]
# input could be a dict
merged = target.map(source.to_dict())
for k, v in merged.items():
assert v == source[target[k]]
# function
result = datetime_series.map(lambda x: x * 2)
tm.assert_series_equal(result, datetime_series * 2)
# GH 10324
a = Series([1, 2, 3, 4])
b = Series(["even", "odd", "even", "odd"], dtype="category")
c = Series(["even", "odd", "even", "odd"])
exp = Series(["odd", "even", "odd", np.nan], dtype="category")
tm.assert_series_equal(a.map(b), exp)
exp = Series(["odd", "even", "odd", np.nan])
tm.assert_series_equal(a.map(c), exp)
a = Series(["a", "b", "c", "d"])
b = Series([1, 2, 3, 4], index=pd.CategoricalIndex(["b", "c", "d", "e"]))
c = Series([1, 2, 3, 4], index=Index(["b", "c", "d", "e"]))
exp = Series([np.nan, 1, 2, 3])
tm.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 1, 2, 3])
tm.assert_series_equal(a.map(c), exp)
a = Series(["a", "b", "c", "d"])
b = Series(
["B", "C", "D", "E"],
dtype="category",
index=pd.CategoricalIndex(["b", "c", "d", "e"]),
)
c = Series(["B", "C", "D", "E"], index=Index(["b", "c", "d", "e"]))
exp = Series(
pd.Categorical([np.nan, "B", "C", "D"], categories=["B", "C", "D", "E"])
)
tm.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, "B", "C", "D"])
tm.assert_series_equal(a.map(c), exp)
def test_map_empty(self, index):
if isinstance(index, MultiIndex):
pytest.skip("Initializing a Series from a MultiIndex is not supported")
s = Series(index)
result = s.map({})
expected = Series(np.nan, index=s.index)
tm.assert_series_equal(result, expected)
def test_map_compat(self):
# related GH 8024
s = Series([True, True, False], index=[1, 2, 3])
result = s.map({True: "foo", False: "bar"})
expected = Series(["foo", "foo", "bar"], index=[1, 2, 3])
tm.assert_series_equal(result, expected)
def test_map_int(self):
left = Series({"a": 1.0, "b": 2.0, "c": 3.0, "d": 4})
right = Series({1: 11, 2: 22, 3: 33})
assert left.dtype == np.float_
assert issubclass(right.dtype.type, np.integer)
merged = left.map(right)
assert merged.dtype == np.float_
assert isna(merged["d"])
assert not isna(merged["c"])
def test_map_type_inference(self):
s = Series(range(3))
s2 = s.map(lambda x: np.where(x == 0, 0, 1))
assert issubclass(s2.dtype.type, np.integer)
def test_map_decimal(self, string_series):
from decimal import Decimal
result = string_series.map(lambda x: Decimal(str(x)))
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
def test_map_na_exclusion(self):
s = Series([1.5, np.nan, 3, np.nan, 5])
result = s.map(lambda x: x * 2, na_action="ignore")
exp = s * 2
tm.assert_series_equal(result, exp)
def test_map_dict_with_tuple_keys(self):
"""
Due to new MultiIndex-ing behaviour in v0.14.0,
dicts with tuple keys passed to map were being
converted to a multi-index, preventing tuple values
from being mapped properly.
"""
# GH 18496
df = DataFrame({"a": [(1,), (2,), (3, 4), (5, 6)]})
label_mappings = {(1,): "A", (2,): "B", (3, 4): "A", (5, 6): "B"}
df["labels"] = df["a"].map(label_mappings)
df["expected_labels"] = Series(["A", "B", "A", "B"], index=df.index)
# All labels should be filled now
tm.assert_series_equal(df["labels"], df["expected_labels"], check_names=False)
def test_map_counter(self):
s = Series(["a", "b", "c"], index=[1, 2, 3])
counter = Counter()
counter["b"] = 5
counter["c"] += 1
result = s.map(counter)
expected = Series([0, 5, 1], index=[1, 2, 3])
tm.assert_series_equal(result, expected)
def test_map_defaultdict(self):
s = Series([1, 2, 3], index=["a", "b", "c"])
default_dict = defaultdict(lambda: "blank")
default_dict[1] = "stuff"
result = s.map(default_dict)
expected = Series(["stuff", "blank", "blank"], index=["a", "b", "c"])
tm.assert_series_equal(result, expected)
def test_map_dict_na_key(self):
# https://github.com/pandas-dev/pandas/issues/17648
# Checks that np.nan key is appropriately mapped
s = Series([1, 2, np.nan])
expected = Series(["a", "b", "c"])
result = s.map({1: "a", 2: "b", np.nan: "c"})
tm.assert_series_equal(result, expected)
def test_map_dict_subclass_with_missing(self):
"""
Test Series.map with a dictionary subclass that defines __missing__,
i.e. sets a default value (GH #15999).
"""
class DictWithMissing(dict):
def __missing__(self, key):
return "missing"
s = Series([1, 2, 3])
dictionary = DictWithMissing({3: "three"})
result = s.map(dictionary)
expected = Series(["missing", "missing", "three"])
tm.assert_series_equal(result, expected)
def test_map_dict_subclass_without_missing(self):
class DictWithoutMissing(dict):
pass
s = Series([1, 2, 3])
dictionary = DictWithoutMissing({3: "three"})
result = s.map(dictionary)
expected = Series([np.nan, np.nan, "three"])
tm.assert_series_equal(result, expected)
def test_map_abc_mapping(self, non_dict_mapping_subclass):
# https://github.com/pandas-dev/pandas/issues/29733
# Check collections.abc.Mapping support as mapper for Series.map
s = Series([1, 2, 3])
not_a_dictionary = non_dict_mapping_subclass({3: "three"})
result = s.map(not_a_dictionary)
expected = Series([np.nan, np.nan, "three"])
tm.assert_series_equal(result, expected)
def test_map_abc_mapping_with_missing(self, non_dict_mapping_subclass):
# https://github.com/pandas-dev/pandas/issues/29733
# Check collections.abc.Mapping support as mapper for Series.map
class NonDictMappingWithMissing(non_dict_mapping_subclass):
def __missing__(self, key):
return "missing"
s = Series([1, 2, 3])
not_a_dictionary = NonDictMappingWithMissing({3: "three"})
result = s.map(not_a_dictionary)
# __missing__ is a dict concept, not a Mapping concept,
# so it should not change the result!
expected = Series([np.nan, np.nan, "three"])
tm.assert_series_equal(result, expected)
def test_map_box(self):
vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]
s = Series(vals)
assert s.dtype == "datetime64[ns]"
# boxed value must be Timestamp instance
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = Series(["Timestamp_1_None", "Timestamp_2_None"])
tm.assert_series_equal(res, exp)
vals = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
]
s = Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")]
s = Series(vals)
assert s.dtype == "timedelta64[ns]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.days}")
exp = Series(["Timedelta_1", "Timedelta_2"])
tm.assert_series_equal(res, exp)
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = Series(vals)
assert s.dtype == "Period[M]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.freqstr}")
exp = Series(["Period_M", "Period_M"])
tm.assert_series_equal(res, exp)
def test_map_categorical(self):
values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)
s = Series(values, name="XX", index=list("abcdefg"))
result = s.map(lambda x: x.lower())
exp_values = pd.Categorical(
list("abbabcd"), categories=list("dcba"), ordered=True
)
exp = Series(exp_values, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp_values)
result = s.map(lambda x: "A")
exp = Series(["A"] * 7, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
assert result.dtype == object
with pytest.raises(NotImplementedError, match=tm.EMPTY_STRING_PATTERN):
s.map(lambda x: x, na_action="ignore")
def test_map_datetimetz(self):
values = pd.date_range("2011-01-01", "2011-01-02", freq="H").tz_localize(
"Asia/Tokyo"
)
s = Series(values, name="XX")
# keep tz
result = s.map(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range("2011-01-02", "2011-01-03", freq="H").tz_localize(
"Asia/Tokyo"
)
exp = Series(exp_values, name="XX")
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.map(lambda x: x.hour)
exp = Series(list(range(24)) + [0], name="XX", dtype=np.int64)
tm.assert_series_equal(result, exp)
with pytest.raises(NotImplementedError, match=tm.EMPTY_STRING_PATTERN):
s.map(lambda x: x, na_action="ignore")
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = Series(["Asia/Tokyo"] * 25, name="XX")
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize(
"vals,mapping,exp",
[
(list("abc"), {np.nan: "not NaN"}, [np.nan] * 3 + ["not NaN"]),
(list("abc"), {"a": "a letter"}, ["a letter"] + [np.nan] * 3),
(list(range(3)), {0: 42}, [42] + [np.nan] * 3),
],
)
def test_map_missing_mixed(self, vals, mapping, exp):
# GH20495
s = Series(vals + [np.nan])
result = s.map(mapping)
tm.assert_series_equal(result, Series(exp))
@pytest.mark.parametrize(
"dti,exp",
[
(
Series([1, 2], index=pd.DatetimeIndex([0, 31536000000])),
DataFrame(np.repeat([[1, 2]], 2, axis=0), dtype="int64"),
),
(
tm.makeTimeSeries(nper=30),
DataFrame(np.repeat([[1, 2]], 30, axis=0), dtype="int64"),
),
],
)
@pytest.mark.parametrize("aware", [True, False])
def test_apply_series_on_date_time_index_aware_series(self, dti, exp, aware):
# GH 25959
# Calling apply on a localized time series should not cause an error
if aware:
index = dti.tz_localize("UTC").index
else:
index = dti.index
result = Series(index).apply(lambda x: Series([1, 2]))
tm.assert_frame_equal(result, exp)
def test_apply_scaler_on_date_time_index_aware_series(self):
# GH 25959
# Calling apply on a localized time series should not cause an error
series = tm.makeTimeSeries(nper=30).tz_localize("UTC")
result = Series(series.index).apply(lambda x: 1)
tm.assert_series_equal(result, Series(np.ones(30), dtype="int64"))
def test_map_float_to_string_precision(self):
# GH 13228
ser = Series(1 / 3)
result = ser.map(lambda val: str(val)).to_dict()
expected = {0: "0.3333333333333333"}
assert result == expected
def test_map_with_invalid_na_action_raises(self):
# https://github.com/pandas-dev/pandas/issues/32815
s = Series([1, 2, 3])
msg = "na_action must either be 'ignore' or None"
with pytest.raises(ValueError, match=msg):
s.map(lambda x: x, na_action="____")
def test_apply_to_timedelta(self):
list_of_valid_strings = ["00:00:01", "00:00:02"]
a = pd.to_timedelta(list_of_valid_strings)
b = Series(list_of_valid_strings).apply(pd.to_timedelta)
# FIXME: dont leave commented-out
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
list_of_strings = ["00:00:01", np.nan, pd.NaT, pd.NaT]
a = pd.to_timedelta(list_of_strings) # noqa
b = Series(list_of_strings).apply(pd.to_timedelta) # noqa
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
| bsd-3-clause |
huzq/scikit-learn | examples/svm/plot_separating_hyperplane_unbalanced.py | 44 | 2463 | """
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.datasets import make_blobs
# we create two clusters of random points
n_samples_1 = 1000
n_samples_2 = 100
centers = [[0.0, 0.0], [2.0, 2.0]]
clusters_std = [1.5, 0.5]
X, y = make_blobs(n_samples=[n_samples_1, n_samples_2],
centers=centers,
cluster_std=clusters_std,
random_state=0, shuffle=False)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
# fit the model and get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
# plot the samples
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired, edgecolors='k')
# plot the decision functions for both classifiers
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# create grid to evaluate model
xx = np.linspace(xlim[0], xlim[1], 30)
yy = np.linspace(ylim[0], ylim[1], 30)
YY, XX = np.meshgrid(yy, xx)
xy = np.vstack([XX.ravel(), YY.ravel()]).T
# get the separating hyperplane
Z = clf.decision_function(xy).reshape(XX.shape)
# plot decision boundary and margins
a = ax.contour(XX, YY, Z, colors='k', levels=[0], alpha=0.5, linestyles=['-'])
# get the separating hyperplane for weighted classes
Z = wclf.decision_function(xy).reshape(XX.shape)
# plot decision boundary and margins for weighted classes
b = ax.contour(XX, YY, Z, colors='r', levels=[0], alpha=0.5, linestyles=['-'])
plt.legend([a.collections[0], b.collections[0]], ["non weighted", "weighted"],
loc="upper right")
plt.show()
| bsd-3-clause |
Eric89GXL/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 41 | 7742 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
import urllib
print("Downloading data from '%s', please wait..." % url)
opener = urllib.urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in index_map.iteritems())
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <hagberg@lanl.gov>
Dan Schult <dschult@colgate.edu>
Pieter Swart <swart@lanl.gov>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
MarineLasbleis/GrowYourIC | notebooks/WD11.py | 1 | 2954 | # import statements
import numpy as np
import matplotlib.pyplot as plt #for figures
from mpl_toolkits.basemap import Basemap #to render maps
import math
import json #to write dict with parameters
from GrowYourIC import positions, geodyn, geodyn_trg, geodyn_static, plot_data, data
plt.rcParams['figure.figsize'] = (8.0, 3.0) #size of figures
cm = plt.cm.get_cmap('viridis')
cm2 = plt.cm.get_cmap('winter')
## real data set
data_set = data.SeismicFromFile("~/ownCloud/Research/Projets/CIDER_IC/GrowYourIC/GrowYourIC/data/WD11.dat")
residual = data_set.real_residual()
velocity_center = [0., -80]#center of the eastern hemisphere
r, t, p = data_set.extract_rtp("bottom_turning_point")
dist = positions.angular_distance_to_point(t, p, *velocity_center)
fig, ax = plt.subplots(2)
ax[0].hist(1221*(1-r))
zeta = data_set.extract_zeta()
ax[1].hist(zeta)
fig, ax = plt.subplots(sharey=True, figsize=(8, 2))
cm2 = plt.cm.get_cmap('winter')
sc1 = ax.scatter(p, residual, c=zeta, s=10,cmap=cm2, linewidth=0)
ax.set_xlabel("longitude")
ax.set_ylabel("residuals")
ax.set_xlim([-180, 180])
#sc2 = ax[1].scatter(dist, residual, c="k", s=10,cmap=cm2, linewidth=0)
#ax[1].set_xlabel("angular distance to ({}, {})".format(*velocity_center))
#ax[1].set_xlim([0, 180])
#fig.suptitle("Dataset: {},\n geodynamic model: {}".format(data_set_random.name, geodynModel.name))
cbar2 = fig.colorbar(sc1)
cbar2.set_label("zeta")
fig, ax = plt.subplots(figsize=(8, 2))
rICB_dim = 1221. #in km
sc=ax.scatter(p,rICB_dim*(1.-r), c=residual, s=10,cmap=cm, linewidth=0)
ax.set_ylim(-0,120)
fig.gca().invert_yaxis()
ax.set_xlim(-180,180)
cbar = fig.colorbar(sc)
cbar.set_label("Residual")
ax.set_xlabel("longitude")
ax.set_ylabel("depth (km)")
ax.plot([11,11],[10,30], 'k')
ax.plot([21,21],[30,58], 'k')
ax.plot([38,38],[58,110], 'k')
ax.plot([-80,100], [30,30], 'k:')
ax.plot([-80,100], [58,58], 'k:')
points = [13, 234, 456, 1234, 2343, 27, 56, 567, 789]
for point_value in points:
point = data_set[point_value]
print(point)
point.straight_in_out(30)
traj_r = np.zeros(30)
traj_p = np.zeros(30)
for i, po in enumerate(point.points):
r, t, p = po.r, po.theta, po.phi-180.
traj_r[i] =rICB_dim*(1.-r)
traj_p[i] = p
ax.plot(traj_p, traj_r, 'k')
plt.savefig("test.pdf")
print(r.shape, residual.shape)
fig, ax = plt.subplots(1, 4, sharey=True, sharex=True)
sc = ax[0].scatter(residual, zeta, c=dist , cmap="seismic", linewidth=0, s=10)
cbar = fig.colorbar(sc)
masks = [np.squeeze(rICB_dim*(1.-r))<30, np.squeeze(rICB_dim*(1.-r))>58, (np.squeeze(rICB_dim*(1.-r))>30)*np.squeeze(rICB_dim*(1.-r))<58]
#mask = np.squeeze(rICB_dim*(1.-r))<30
#print(mask.shape, zeta.shape)
zeta = np.squeeze(zeta)
dist = np.squeeze(dist)
for i, mask in enumerate(masks):
ax[i+1].scatter(np.ma.masked_where(mask, (residual)), np.ma.masked_where(mask, zeta), c= np.ma.masked_where(mask, dist), s=10, cmap="seismic", linewidth=0)
plt.show() | mit |
chrsrds/scikit-learn | sklearn/utils/tests/test_murmurhash.py | 21 | 2637 | # Author: Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
def test_mmhash3_int():
assert murmurhash3_32(3) == 847579505
assert murmurhash3_32(3, seed=0) == 847579505
assert murmurhash3_32(3, seed=42) == -1823081949
assert murmurhash3_32(3, positive=False) == 847579505
assert murmurhash3_32(3, seed=0, positive=False) == 847579505
assert murmurhash3_32(3, seed=42, positive=False) == -1823081949
assert murmurhash3_32(3, positive=True) == 847579505
assert murmurhash3_32(3, seed=0, positive=True) == 847579505
assert murmurhash3_32(3, seed=42, positive=True) == 2471885347
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert murmurhash3_32(b'foo', 0) == -156908512
assert murmurhash3_32(b'foo', 42) == -1322301282
assert murmurhash3_32(b'foo', 0, positive=True) == 4138058784
assert murmurhash3_32(b'foo', 42, positive=True) == 2972666014
def test_mmhash3_unicode():
assert murmurhash3_32('foo', 0) == -156908512
assert murmurhash3_32('foo', 42) == -1322301282
assert murmurhash3_32('foo', 0, positive=True) == 4138058784
assert murmurhash3_32('foo', 42, positive=True) == 2972666014
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert h not in previous_hashes, \
"Found collision on growing empty string"
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float64)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.full(n_bins, 1. / n_bins)
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
| bsd-3-clause |
userbz/DeMix | deprecated/Version_0/post1_psmMzmlExtend.py | 1 | 1529 | # Bo.Zhang@ki.se
# extending PSM list from second-pass Morpheus search for rescoring
import sys
import pymzml
import numpy
import pandas
def nearest(target, arr):
try:
return arr[numpy.abs(arr - target).argmin()]
except:
return 0
def peak_pair(target, arr):
match = [nearest(p, arr) for p in target]
return repr(match)
def pymzml_psm(fn, df):
speciter = pymzml.run.Reader(fn)
index_set = set(df.index)
df['Nearest Matches'] = ['' for _ in df.index]
try:
for spec in speciter:
idx = int(spec.xmlTree.next().get('index')) + 1
if idx and idx % 2000 == 0:
sys.stderr.write("%d %s\n" % (idx, fn))
if spec['ms level'] != 2 or idx not in index_set:
continue
theoSpec = df.loc[idx]['Theoretical Products']
specPeaks = numpy.array(map(lambda p: p[0], spec.peaks))
match = peak_pair(theoSpec, specPeaks)
df.loc[idx, 'Nearest Matches'] = match
except KeyError:
pass
return df
if __name__ == '__main__':
df = pandas.read_table(sys.argv[1], index_col= 1)
if 'Theoretical Products' in df.columns:
df['Theoretical Products'] = [eval(i) for i in df['Theoretical Products']]
else:
import psmTheoretical
df = psmTheoretical.add_theoretical(sys.argv[1])
df.to_csv(sys.argv[1]+ '.ext', index=0, sep='\t')
df = pymzml_psm(sys.argv[2], df)
df.to_csv(sys.argv[1]+ '.ext.matched', index=0, sep='\t')
| mit |
linucks/ample | ample/util/benchmark_util.py | 1 | 21169 | """
Created on 24 Oct 2014
@author: jmht
"""
import copy
import glob
import logging
import os
import pandas as pd
import shutil
import sys
from ample.util import ample_util, csymmatch, mtz_util, pdb_edit, pdb_model, reforigin, residue_map, rio, shelxe, tm_util
logger = logging.getLogger(__name__)
_oldroot = None
_newroot = None
SHELXE_STEM = 'shelxe'
_CSV_KEYLIST = [
'ample_version',
# Native info
'native_pdb_code',
'native_pdb_title',
'native_pdb_resolution',
'native_pdb_solvent_content',
'native_pdb_space_group',
'native_pdb_num_atoms',
'native_pdb_num_residues',
'native_pdb_num_chains',
# The modelled sequence
'fasta_length',
# Get the ensemble data and add to the MRBUMP data
'ensemble_name',
'ensemble_percent_model',
# cluster info
'cluster_method',
'num_clusters',
'cluster_num',
'cluster_centroid',
'cluster_num_models',
# truncation info
'truncation_level',
'percent_truncation',
'truncation_method',
'truncation_pruning',
'truncation_variance',
'num_residues',
'pruned_residues',
# subclustering info
'subcluster_num_models',
'subcluster_radius_threshold',
'subcluster_centroid_model',
'subcluster_centroid_model_RMSD',
'subcluster_centroid_model_TM',
# ensemble info
# 'name',
'side_chain_treatment',
'ensemble_num_atoms',
# MR result info
# 'name',
'MR_program',
'Solution_Type',
'PHASER_LLG',
'PHASER_TFZ',
'PHASER_RFZ',
'PHASER_time',
'PHASER_killed',
'PHASER_version',
'PHASER_errors',
'MOLREP_score',
'MOLREP_time',
'MOLREP_version',
'MR_MPE',
'MR_wMPE',
'REFMAC_Rfact',
'REFMAC_Rfree',
# 'REFMAC_MPE',
# 'REFMAC_wMPE',
'REFMAC_version',
'BUCC_final_Rfact',
'BUCC_final_Rfree',
'BUCC_version',
'ARP_final_Rfact',
'ARP_final_Rfree',
'ARP_version',
'SHELXE_CC',
'SHELXE_ACL',
'SHELXE_MCL',
'SHELXE_NC',
'SHELXE_wPE',
'SHELXE_wMPE',
'SHELXE_os',
'SHELXE_time',
'SHELXE_version',
'SXRBUCC_version',
'SXRBUCC_final_Rfact',
'SXRBUCC_final_Rfree',
'SXRBUCC_MPE',
'SXRBUCC_wMPE',
'SXRARP_version',
'SXRARP_final_Rfact',
'SXRARP_final_Rfree',
'SXRARP_MPE',
'SXRARP_wMPE',
'num_placed_chains',
'num_placed_atoms',
'reforigin_RMSD',
'AA_num_contacts',
'RIO_num_contacts',
'RIO_in_register',
'RIO_oo_register',
'RIO_backwards',
'RIO',
'RIO_no_cat',
'RIO_norm',
]
def analyse(amoptd, newroot=None):
if newroot:
assert os.path.isdir(newroot)
global _oldroot, _newroot
_newroot = newroot
_oldroot = amoptd['work_dir']
if not os.path.isdir(fixpath(amoptd['benchmark_dir'])):
os.mkdir(fixpath(amoptd['benchmark_dir']))
os.chdir(fixpath(amoptd['benchmark_dir']))
# AnalysePdb may have already been called from the main script
if amoptd['native_pdb'] and 'native_pdb_std' not in amoptd:
analysePdb(amoptd)
if amoptd['native_pdb_std']:
# Generate an SHELXE HKL and ENT file so that we can calculate phase errors
mtz_util.to_hkl(amoptd['mtz'], hkl_file=os.path.join(amoptd['benchmark_dir'], SHELXE_STEM + ".hkl"))
shutil.copyfile(amoptd['native_pdb_std'], os.path.join(amoptd['benchmark_dir'], SHELXE_STEM + ".ent"))
if amoptd['native_pdb'] and not (
amoptd['homologs'] or amoptd['ideal_helices'] or amoptd['import_ensembles'] or amoptd['single_model_mode']
):
analyseModels(amoptd)
# Get the ensembling data
if 'ensembles_data' not in amoptd or not len(amoptd['ensembles_data']):
logger.critical("Benchmark cannot find any ensemble data!")
return
# Get dict of ensemble name -> ensemble result
ensemble_results = {e['name']: e for e in amoptd['ensembles_data']}
# Get mrbump_results for cluster
if 'mrbump_results' not in amoptd or not len(amoptd['mrbump_results']):
logger.critical("Benchmark cannot find any mrbump results!")
return
data = []
mrinfo = shelxe.MRinfo(amoptd['shelxe_exe'], amoptd['native_pdb_info'].pdb, amoptd['mtz'])
for result in amoptd['mrbump_results']:
# use mrbump dict as basis for result object
d = copy.copy(result)
# Add in the data from the ensemble
d.update(ensemble_results[d['ensemble_name']])
assert d['ensemble_name'] == d['name'], d
# Hack for old results
if 'truncation_num_residues' in d:
d['num_residues'] = d['truncation_num_residues']
del d['truncation_num_residues']
# Hack for ideal helices where num_residues are missing
if amoptd['ideal_helices'] and ('num_residues' not in d or d['num_residues'] is None):
d['num_residues'] = int(d['ensemble_name'].lstrip('polyala'))
# Get the ensemble data and add to the MRBUMP data
d['ensemble_percent_model'] = int((float(d['num_residues']) / float(amoptd['fasta_length'])) * 100)
if amoptd['native_pdb']:
# Add in stuff we've cleaned from the pdb
native_keys = [
'native_pdb_code',
'native_pdb_title',
'native_pdb_resolution',
'native_pdb_solvent_content',
'native_pdb_space_group',
'native_pdb_num_chains',
'native_pdb_num_atoms',
'native_pdb_num_residues',
]
d.update({key: amoptd[key] for key in native_keys})
# Analyse the solution
analyseSolution(amoptd, d, mrinfo)
data.append(d)
# Put everything in a pandas DataFrame
dframe = pd.DataFrame(data)
# General stuff
dframe['ample_version'] = amoptd['ample_version']
dframe['fasta_length'] = amoptd['fasta_length']
# Analyse subcluster centroid models
if 'subcluster_centroid_model' in dframe.columns and amoptd['native_pdb']:
centroid_index = dframe.index
centroid_models = [fixpath(f) for f in dframe.subcluster_centroid_model]
native_pdb_std = fixpath(amoptd['native_pdb_std'])
fasta = fixpath(amoptd['fasta'])
# Calculation of TMscores for subcluster centroid models
if amoptd['have_tmscore']:
tm = tm_util.TMscore(amoptd['tmscore_exe'], wdir=fixpath(amoptd['benchmark_dir']), **amoptd)
tm_results = tm.compare_structures(centroid_models, [native_pdb_std], [fasta])
centroid_tmscores = [r['tmscore'] for r in tm_results]
centroid_rmsds = [r['rmsd'] for r in tm_results]
else:
raise RuntimeError("No program to calculate tmscores!")
dframe['subcluster_centroid_model_TM'] = pd.Series(centroid_tmscores, index=centroid_index)
dframe['subcluster_centroid_model_RMSD'] = pd.Series(centroid_rmsds, index=centroid_index)
# Save the data
file_name = os.path.join(fixpath(amoptd['benchmark_dir']), 'results.csv')
dframe.to_csv(file_name, columns=_CSV_KEYLIST, index=False, na_rep="N/A")
amoptd['benchmark_results'] = dframe.to_dict('records')
return
def analyseModels(amoptd):
# Get hold of a full model so we can do the mapping of residues
refModelPdb = glob.glob(os.path.join(amoptd['models_dir'], "*.pdb"))[0]
nativePdbInfo = amoptd['native_pdb_info']
refModelPdbInfo = pdb_edit.get_info(refModelPdb)
amoptd['ref_model_pdb_info'] = refModelPdbInfo
try:
resSeqMap = residue_map.residueSequenceMap()
resSeqMap.fromInfo(
refInfo=refModelPdbInfo,
refChainID=refModelPdbInfo.models[0].chains[0], # Only 1 chain in model
targetInfo=nativePdbInfo,
targetChainID=nativePdbInfo.models[0].chains[0],
)
amoptd['res_seq_map'] = resSeqMap
except Exception as e:
logger.exception("Error calculating resSeqMap: %s" % e)
amoptd['res_seq_map'] = None # Won't be able to calculate RIO scores
if amoptd['have_tmscore']:
try:
tm = tm_util.TMscore(amoptd['tmscore_exe'], wdir=fixpath(amoptd['benchmark_dir']))
# Calculation of TMscores for all models
logger.info("Analysing Rosetta models with TMscore")
model_list = sorted(glob.glob(os.path.join(amoptd['models_dir'], "*pdb")))
structure_list = [amoptd['native_pdb_std']]
amoptd['tmComp'] = tm.compare_structures(model_list, structure_list, fastas=[amoptd['fasta']])
except Exception as e:
logger.exception("Unable to run TMscores: %s", e)
else:
raise RuntimeError("No program to calculate TMSCORES")
def analysePdb(amoptd):
"""Collect data on the native pdb structure"""
nativePdb = fixpath(amoptd['native_pdb'])
nativePdbInfo = pdb_edit.get_info(nativePdb)
# number atoms/residues
natoms, nresidues = pdb_edit.num_atoms_and_residues(nativePdb)
# Get information on the origins for this spaceGroup
try:
originInfo = pdb_model.OriginInfo(spaceGroupLabel=nativePdbInfo.crystalInfo.spaceGroup)
except Exception:
originInfo = None
# Do this here as a bug in pdbcur can knacker the CRYST1 data
amoptd['native_pdb_code'] = nativePdbInfo.pdbCode
amoptd['native_pdb_title'] = nativePdbInfo.title
amoptd['native_pdb_resolution'] = nativePdbInfo.resolution
amoptd['native_pdb_solvent_content'] = nativePdbInfo.solventContent
amoptd['native_pdb_matthews_coefficient'] = nativePdbInfo.matthewsCoefficient
if not originInfo:
space_group = "P1"
else:
space_group = originInfo.spaceGroup()
amoptd['native_pdb_space_group'] = space_group
amoptd['native_pdb_num_atoms'] = natoms
amoptd['native_pdb_num_residues'] = nresidues
# First check if the native has > 1 model and extract the first if so
if len(nativePdbInfo.models) > 1:
logger.info("nativePdb has > 1 model - using first")
nativePdb1 = ample_util.filename_append(
filename=nativePdb, astr="model1", directory=fixpath(amoptd['work_dir'])
)
pdb_edit.extract_model(nativePdb, nativePdb1, modelID=nativePdbInfo.models[0].serial)
nativePdb = nativePdb1
# Standardise the PDB to rename any non-standard AA, remove solvent etc
nativePdbStd = ample_util.filename_append(filename=nativePdb, astr="std", directory=fixpath(amoptd['work_dir']))
pdb_edit.standardise(nativePdb, nativePdbStd, del_hetatm=True)
nativePdb = nativePdbStd
# Get the new Info about the native
nativePdbInfo = pdb_edit.get_info(nativePdb)
# For comparsion of shelxe model we need a single chain from the native so we get this here
if len(nativePdbInfo.models[0].chains) > 1:
nativeChain1 = ample_util.filename_append(
filename=nativePdbInfo.pdb, astr="chain1", directory=fixpath(amoptd['work_dir'])
)
pdb_edit.merge_chains(nativePdbInfo.pdb, nativeChain1)
else:
nativeChain1 = nativePdbInfo.pdb
# Additional data
amoptd['native_pdb_num_chains'] = len(nativePdbInfo.models[0].chains)
amoptd['native_pdb_info'] = nativePdbInfo
amoptd['native_pdb_std'] = nativePdbStd
amoptd['native_pdb_1chain'] = nativeChain1
amoptd['native_pdb_origin_info'] = originInfo
return
def analyseSolution(amoptd, d, mrinfo):
logger.info("Benchmark: analysing result: {0}".format(d['ensemble_name']))
mrPdb = None
if d['MR_program'] == "PHASER":
mrPdb = d['PHASER_pdbout']
mrMTZ = d['PHASER_mtzout']
elif d['MR_program'] == "MOLREP":
mrPdb = d['MOLREP_pdbout']
elif d['MR_program'] == "unknown":
return
if mrPdb is None or not os.path.isfile(mrPdb):
# logger.critical("Cannot find mrPdb {0} for solution {1}".format(mrPdb,d))
return
# debug - copy into work directory as reforigin struggles with long pathnames
shutil.copy(mrPdb, os.path.join(fixpath(amoptd['benchmark_dir']), os.path.basename(mrPdb)))
mrPdbInfo = pdb_edit.get_info(mrPdb)
d['num_placed_chains'] = mrPdbInfo.numChains()
d['num_placed_atoms'] = mrPdbInfo.numAtoms()
d['num_placed_CA'] = mrPdbInfo.numCalpha()
if amoptd['native_pdb']:
if not d['SHELXE_os']:
logger.critical("mrPdb {0} has no SHELXE_os origin shift. Calculating...".format(mrPdb))
mrinfo.analyse(mrPdb)
mrOrigin = mrinfo.originShift
d['SHELXE_MPE'] = mrinfo.MPE
d['SHELXE_wMPE'] = mrinfo.wMPE
else:
mrOrigin = [c * -1 for c in d['SHELXE_os']]
# Move pdb onto new origin
originPdb = ample_util.filename_append(mrPdb, astr='offset', directory=fixpath(amoptd['benchmark_dir']))
pdb_edit.translate(mrPdb, originPdb, mrOrigin)
# offset.pdb is the mrModel shifted onto the new origin use csymmatch to wrap onto native
csymmatch.Csymmatch().wrapModelToNative(
originPdb,
amoptd['native_pdb'],
csymmatchPdb=os.path.join(
fixpath(amoptd['benchmark_dir']), "phaser_{0}_csymmatch.pdb".format(d['ensemble_name'])
),
)
# can now delete origin pdb
os.unlink(originPdb)
# Calculate phase error for the MR PDB
try:
mrinfo.analyse(mrPdb)
d['MR_MPE'] = mrinfo.MPE
d['MR_wMPE'] = mrinfo.wMPE
except Exception as e:
logger.critical("Error analysing mrPdb: {0}\n{1}".format(mrPdb, e))
# We cannot calculate the Reforigin RMSDs or RIO scores for runs where we don't have a full initial model
# to compare to the native to allow us to determine which parts of the ensemble correspond to which parts of
# the native structure - or if we were unable to calculate a res_seq_map
if not (
amoptd['homologs']
or amoptd['ideal_helices']
or amoptd['import_ensembles']
or amoptd['single_model_mode']
or amoptd['res_seq_map']
):
# Get reforigin info
rmsder = reforigin.ReforiginRmsd()
try:
rmsder.getRmsd(
nativePdbInfo=amoptd['native_pdb_info'],
placedPdbInfo=mrPdbInfo,
refModelPdbInfo=amoptd['ref_model_pdb_info'],
cAlphaOnly=True,
workdir=fixpath(amoptd['benchmark_dir']),
)
d['reforigin_RMSD'] = rmsder.rmsd
except Exception as e:
logger.critical("Error calculating RMSD: {0}".format(e))
d['reforigin_RMSD'] = 999
# Score the origin with all-atom and rio
rioData = rio.Rio().scoreOrigin(
mrOrigin,
mrPdbInfo=mrPdbInfo,
nativePdbInfo=amoptd['native_pdb_info'],
resSeqMap=amoptd['res_seq_map'],
workdir=fixpath(amoptd['benchmark_dir']),
)
# Set attributes
d['AA_num_contacts'] = rioData.aaNumContacts
d['RIO_num_contacts'] = rioData.rioNumContacts
d['RIO_in_register'] = rioData.rioInRegister
d['RIO_oo_register'] = rioData.rioOoRegister
d['RIO_backwards'] = rioData.rioBackwards
d['RIO'] = rioData.rioInRegister + rioData.rioOoRegister
d['RIO_no_cat'] = rioData.rioNumContacts - (rioData.rioInRegister + rioData.rioOoRegister)
d['RIO_norm'] = float(d['RIO']) / float(d['native_pdb_num_residues'])
else:
d['AA_num_contacts'] = None
d['RIO_num_contacts'] = None
d['RIO_in_register'] = None
d['RIO_oo_register'] = None
d['RIO_backwards'] = None
d['RIO'] = None
d['RIO_no_cat'] = None
d['RIO_norm'] = None
# # Now get the helix
# helixSequence = contacts.Rio().helixFromContacts( contacts=rioData.contacts,
# dsspLog=dsspLog )
# if helixSequence is not None:
# ampleResult.rioHelixSequence = helixSequence
# ampleResult.rioLenHelix = len( helixSequence )
# hfile = os.path.join( workdir, "{0}.helix".format( ampleResult.ensembleName ) )
# with open( hfile, 'w' ) as f:
# f.write( helixSequence+"\n" )
#
# This purely for checking and so we have pdbs to view
#
# Wrap shelxe trace onto native using Csymmatch
if not d['SHELXE_pdbout'] is None and os.path.isfile(fixpath(d['SHELXE_pdbout'])):
csymmatch.Csymmatch().wrapModelToNative(
fixpath(d['SHELXE_pdbout']),
amoptd['native_pdb'],
origin=mrOrigin,
workdir=fixpath(amoptd['benchmark_dir']),
)
if not ('SHELXE_wMPE' in d and d['SHELXE_wMPE']):
try:
mrinfo.analyse(d['SHELXE_pdbout'])
d['SHELXE_MPE'] = mrinfo.MPE
d['SHELXE_wMPE'] = mrinfo.wMPE
except Exception as e:
logger.critical("Error analysing SHELXE_pdbout: {0}\n{1}".format(d['SHELXE_pdbout'], e))
# Wrap parse_buccaneer model onto native
if d['SXRBUCC_pdbout'] and os.path.isfile(fixpath(d['SXRBUCC_pdbout'])):
# Need to rename Pdb as is just called buccSX_output.pdb
csymmatchPdb = os.path.join(
fixpath(amoptd['benchmark_dir']), "buccaneer_{0}_csymmatch.pdb".format(d['ensemble_name'])
)
csymmatch.Csymmatch().wrapModelToNative(
fixpath(d['SXRBUCC_pdbout']),
amoptd['native_pdb'],
origin=mrOrigin,
csymmatchPdb=csymmatchPdb,
workdir=fixpath(amoptd['benchmark_dir']),
)
# Calculate phase error
try:
mrinfo.analyse(d['SXRBUCC_pdbout'])
d['SXRBUCC_MPE'] = mrinfo.MPE
d['SXRBUCC_wMPE'] = mrinfo.wMPE
except Exception as e:
logger.critical("Error analysing SXRBUCC_pdbout: {0}\n{1}".format(d['SXRBUCC_pdbout'], e))
# Wrap parse_buccaneer model onto native
if d['SXRARP_pdbout'] and os.path.isfile(fixpath(d['SXRARP_pdbout'])):
# Need to rename Pdb as is just called buccSX_output.pdb
csymmatchPdb = os.path.join(
fixpath(amoptd['benchmark_dir']), "arpwarp_{0}_csymmatch.pdb".format(d['ensemble_name'])
)
csymmatch.Csymmatch().wrapModelToNative(
fixpath(d['SXRARP_pdbout']),
amoptd['native_pdb'],
origin=mrOrigin,
csymmatchPdb=csymmatchPdb,
workdir=fixpath(amoptd['benchmark_dir']),
)
# Calculate phase error
try:
mrinfo.analyse(d['SXRARP_pdbout'])
d['SXRARP_MPE'] = mrinfo.MPE
d['SXRARP_wMPE'] = mrinfo.wMPE
except Exception as e:
logger.critical("Error analysing SXRARP_pdbout: {0}\n{1}".format(d['SXRARP_pdbout'], e))
return
def cluster_script(amoptd, python_path="ccp4-python"):
"""Create the script for benchmarking on a cluster"""
# write out script
work_dir = amoptd['work_dir']
script_path = os.path.join(work_dir, "submit_benchmark.sh")
with open(script_path, "w") as job_script:
job_script.write("#!/bin/sh\n")
# Find path to this directory to get path to python ensemble.py script
pydir = os.path.abspath(os.path.dirname(__file__))
benchmark_script = os.path.join(pydir, "benchmark_util.py")
job_script.write("{0} {1} {2} {3}\n".format(python_path, "-u", benchmark_script, amoptd['results_path']))
# Make executable
os.chmod(script_path, 0o777)
return script_path
def fixpath(path):
# fix for analysing on a different machine
if _oldroot and _newroot:
return path.replace(_oldroot, _newroot)
else:
return path
# Run unit tests
if __name__ == "__main__":
# Set up logging - could append to an existing log?
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# This runs the benchmarking starting from a pickled file containing an amopt dictionary.
# - used when submitting the modelling jobs to a cluster
if len(sys.argv) != 2 or not os.path.isfile(sys.argv[1]):
logging.debug("benchmark script requires the path to a pickled amopt dictionary!")
sys.exit(1)
# Get the amopt dictionary
amoptd = ample_util.read_amoptd(sys.argv[1])
fl = logging.FileHandler(os.path.join(amoptd['work_dir'], "benchmark.log"))
fl.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fl.setFormatter(formatter)
logger.addHandler(fl)
analyse(amoptd)
ample_util.save_amoptd(amoptd)
| bsd-3-clause |
joshloyal/scikit-learn | sklearn/cross_decomposition/cca_.py | 151 | 3192 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
super(CCA, self).__init__(n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
gustavla/dotfiles | ipython_config.py | 1 | 19767 | # Configuration file for ipython.
c = get_config()
#------------------------------------------------------------------------------
# InteractiveShellApp configuration
#------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = []
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.InteractiveShellApp.matplotlib = None
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
# Run the module as a script.
# c.InteractiveShellApp.module_to_run = ''
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.InteractiveShellApp.gui = None
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.InteractiveShellApp.exec_PYTHONSTARTUP = True
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.InteractiveShellApp.hide_initial_ns = True
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.InteractiveShellApp.pylab_import_all = True
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.InteractiveShellApp.pylab = None
# lines of code to run at IPython startup.
# c.InteractiveShellApp.exec_lines = []
# A list of dotted module names of IPython extensions to load.
# c.InteractiveShellApp.extensions = []
#------------------------------------------------------------------------------
# TerminalIPythonApp configuration
#------------------------------------------------------------------------------
# TerminalIPythonApp will inherit config from: BaseIPythonApplication,
# Application, InteractiveShellApp
# A file to be run
# c.TerminalIPythonApp.file_to_run = ''
# List of files to run at IPython startup.
# c.TerminalIPythonApp.exec_files = []
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.TerminalIPythonApp.copy_config_files = False
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.TerminalIPythonApp.matplotlib = None
# Set the log level by value or name.
# c.TerminalIPythonApp.log_level = 30
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.TerminalIPythonApp.exec_PYTHONSTARTUP = True
# Run the module as a script.
# c.TerminalIPythonApp.module_to_run = ''
# The IPython profile to use.
# c.TerminalIPythonApp.profile = 'default'
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.TerminalIPythonApp.pylab = None
# Whether to overwrite existing config files when copying
# c.TerminalIPythonApp.overwrite = False
# Whether to display a banner upon starting IPython.
# c.TerminalIPythonApp.display_banner = True
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
# Execute the given command string.
# c.TerminalIPythonApp.code_to_run = ''
# dotted module name of an IPython extension to load.
# c.TerminalIPythonApp.extra_extension = ''
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.TerminalIPythonApp.force_interact = False
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.TerminalIPythonApp.verbose_crash = False
# The Logging format template
# c.TerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.TerminalIPythonApp.hide_initial_ns = True
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.TerminalIPythonApp.gui = None
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.TerminalIPythonApp.extra_config_file = ''
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.TerminalIPythonApp.pylab_import_all = True
# lines of code to run at IPython startup.
# c.TerminalIPythonApp.exec_lines = []
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.TerminalIPythonApp.ipython_dir = ''
# A list of dotted module names of IPython extensions to load.
# c.TerminalIPythonApp.extensions = []
# Suppress warning messages about legacy config files
# c.TerminalIPythonApp.ignore_old_config = False
# The date format used by logging formatters for %(asctime)s
# c.TerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
#------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# TerminalInteractiveShell will inherit config from: InteractiveShell
# Enable auto setting the terminal title.
# c.TerminalInteractiveShell.term_title = False
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.TerminalInteractiveShell.color_info = True
# Deprecated, use PromptManager.out_template
# c.TerminalInteractiveShell.prompt_out = 'Out[\\#]: '
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.TerminalInteractiveShell.ast_transformers = []
#
# c.TerminalInteractiveShell.object_info_string_level = 0
# Start logging to the given file in append mode.
# c.TerminalInteractiveShell.logappend = ''
# The name of the logfile to use.
# c.TerminalInteractiveShell.logfile = ''
# Deprecated, use PromptManager.in2_template
# c.TerminalInteractiveShell.prompt_in2 = ' .\\D.: '
# Deprecated, use PromptManager.justify
# c.TerminalInteractiveShell.prompts_pad_left = True
# The part of the banner to be printed after the profile
# c.TerminalInteractiveShell.banner2 = ''
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = 'vim'
# Enable magic commands to be called without the leading %.
# c.TerminalInteractiveShell.automagic = True
# The shell program to be used for paging.
# c.TerminalInteractiveShell.pager = 'less'
#
# c.TerminalInteractiveShell.history_length = 10000
# Start logging to the default log file.
# c.TerminalInteractiveShell.logstart = False
# Automatically call the pdb debugger after every exception.
# c.TerminalInteractiveShell.pdb = False
#
# c.TerminalInteractiveShell.separate_out = ''
#
# c.TerminalInteractiveShell.separate_in = '\n'
#
# c.TerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Don't call post-execute functions that have failed in the past.
# c.TerminalInteractiveShell.disable_failing_post_execute = False
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.TerminalInteractiveShell.autocall = 0
#
# c.TerminalInteractiveShell.wildcards_case_sensitive = True
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.TerminalInteractiveShell.deep_reload = False
# Show rewritten input, e.g. for autocall.
# c.TerminalInteractiveShell.show_rewritten_input = True
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.TerminalInteractiveShell.ast_node_interactivity = 'last_expr'
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.TerminalInteractiveShell.confirm_exit = True
#
# c.TerminalInteractiveShell.debug = False
# auto editing of files with syntax errors.
# c.TerminalInteractiveShell.autoedit_syntax = False
#
# c.TerminalInteractiveShell.readline_remove_delims = '-/~'
# The part of the banner to be printed before the profile
# c.TerminalInteractiveShell.banner1 = 'Python 3.4.1 (default, Jul 30 2014, 17:10:01) \nType "copyright", "credits" or "license" for more information.\n\nIPython 2.2.0 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
#
# c.TerminalInteractiveShell.readline_use = True
# Autoindent IPython code entered interactively.
# c.TerminalInteractiveShell.autoindent = True
#
# c.TerminalInteractiveShell.ipython_dir = ''
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.TerminalInteractiveShell.cache_size = 1000
# Save multi-line entries as one entry in readline history
# c.TerminalInteractiveShell.multiline_history = True
#
# c.TerminalInteractiveShell.quiet = False
# Set the color scheme (NoColor, Linux, or LightBG).
# c.TerminalInteractiveShell.colors = 'Linux'
#
# c.TerminalInteractiveShell.xmode = 'Context'
#
# c.TerminalInteractiveShell.separate_out2 = ''
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.TerminalInteractiveShell.screen_length = 0
# Deprecated, use PromptManager.in_template
# c.TerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
#------------------------------------------------------------------------------
# PromptManager configuration
#------------------------------------------------------------------------------
# This is the primary interface for producing IPython's prompts.
# Continuation prompt.
c.PromptManager.in2_template = '{color.DarkGray}... '
# Input prompt. '\#' will be transformed to the prompt number
c.PromptManager.in_template = '{color.LightBlue}>>> '
#
# c.PromptManager.color_scheme = 'Linux'
# Output prompt. '\#' will be transformed to the prompt number
c.PromptManager.out_template = ''
# If True (default), each prompt will be right-aligned with the preceding one.
c.PromptManager.justify = False
#------------------------------------------------------------------------------
# HistoryManager configuration
#------------------------------------------------------------------------------
# A class to organize all history-related functionality in one place.
# HistoryManager will inherit config from: HistoryAccessor
# Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
# c.HistoryManager.db_cache_size = 0
# Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
# c.HistoryManager.hist_file = ''
# Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
# c.HistoryManager.connection_options = {}
# Should the history database include output? (default: no)
# c.HistoryManager.db_log_output = False
# enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
# c.HistoryManager.enabled = True
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# PlainTextFormatter configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
# PlainTextFormatter will inherit config from: BaseFormatter
#
# c.PlainTextFormatter.deferred_printers = {}
#
# c.PlainTextFormatter.type_printers = {}
#
# c.PlainTextFormatter.float_precision = ''
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.verbose = False
#
# c.PlainTextFormatter.newline = '\n'
#
# c.PlainTextFormatter.singleton_printers = {}
#
# c.PlainTextFormatter.max_width = 79
#------------------------------------------------------------------------------
# IPCompleter configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# IPCompleter will inherit config from: Completer
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# c.IPCompleter.limit_to__all__ = False
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
# Activate greedy completion
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.IPCompleter.greedy = False
#------------------------------------------------------------------------------
# ScriptMagics configuration
#------------------------------------------------------------------------------
# Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
# Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# c.ScriptMagics.script_paths = {}
# Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# c.ScriptMagics.script_magics = []
#------------------------------------------------------------------------------
# StoreMagics configuration
#------------------------------------------------------------------------------
# Lightweight persistence for python variables.
#
# Provides the %store magic.
# If True, any %store-d variables will be automatically restored when IPython
# starts.
# c.StoreMagics.autorestore = False
| bsd-3-clause |
surligas/cs436-gnuradio | gr-digital/examples/example_costas.py | 49 | 5316 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_costas(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise, foffset, toffset, poffset):
gr.top_block.__init__(self)
rrc_taps = filter.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
data = 2.0*scipy.random.randint(0, 2, N) - 1.0
data = scipy.exp(1j*poffset) * data
self.src = blocks.vector_source_c(data.tolist(), False)
self.rrc = filter.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = channels.channel_model(noise, foffset, toffset)
self.cst = digital.costas_loop_cc(bw, 2)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_cst = blocks.vector_sink_c()
self.vsnk_frq = blocks.vector_sink_f()
self.connect(self.src, self.rrc, self.chn, self.cst, self.vsnk_cst)
self.connect(self.rrc, self.vsnk_src)
self.connect((self.cst,1), self.vsnk_frq)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=2000,
help="Set the number of samples to process [default=%default]")
parser.add_option("-S", "--sps", type="int", default=4,
help="Set the samples per symbol [default=%default]")
parser.add_option("-r", "--rolloff", type="eng_float", default=0.35,
help="Set the rolloff factor [default=%default]")
parser.add_option("-W", "--bandwidth", type="eng_float", default=2*scipy.pi/100.0,
help="Set the loop bandwidth [default=%default]")
parser.add_option("-n", "--ntaps", type="int", default=45,
help="Set the number of taps in the filters [default=%default]")
parser.add_option("", "--noise", type="eng_float", default=0.0,
help="Set the simulation noise voltage [default=%default]")
parser.add_option("-f", "--foffset", type="eng_float", default=0.0,
help="Set the simulation's normalized frequency offset (in Hz) [default=%default]")
parser.add_option("-t", "--toffset", type="eng_float", default=1.0,
help="Set the simulation's timing offset [default=%default]")
parser.add_option("-p", "--poffset", type="eng_float", default=0.707,
help="Set the simulation's phase offset [default=%default]")
(options, args) = parser.parse_args ()
# Adjust N for the interpolation by sps
options.nsamples = options.nsamples // options.sps
# Set up the program-under-test
put = example_costas(options.nsamples, options.sps, options.rolloff,
options.ntaps, options.bandwidth, options.noise,
options.foffset, options.toffset, options.poffset)
put.run()
data_src = scipy.array(put.vsnk_src.data())
# Convert the FLL's LO frequency from rads/sec to Hz
data_frq = scipy.array(put.vsnk_frq.data()) / (2.0*scipy.pi)
# adjust this to align with the data.
data_cst = scipy.array(3*[0,]+list(put.vsnk_cst.data()))
# Plot the Costas loop's LO frequency
f1 = pylab.figure(1, figsize=(12,10), facecolor='w')
s1 = f1.add_subplot(2,2,1)
s1.plot(data_frq)
s1.set_title("Costas LO")
s1.set_xlabel("Samples")
s1.set_ylabel("Frequency (normalized Hz)")
# Plot the IQ symbols
s3 = f1.add_subplot(2,2,2)
s3.plot(data_src.real, data_src.imag, "o")
s3.plot(data_cst.real, data_cst.imag, "rx")
s3.set_title("IQ")
s3.set_xlabel("Real part")
s3.set_ylabel("Imag part")
s3.set_xlim([-2, 2])
s3.set_ylim([-2, 2])
# Plot the symbols in time
s4 = f1.add_subplot(2,2,3)
s4.set_position([0.125, 0.05, 0.775, 0.4])
s4.plot(data_src.real, "o-")
s4.plot(data_cst.real, "rx-")
s4.set_title("Symbols")
s4.set_xlabel("Samples")
s4.set_ylabel("Real Part of Signals")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
mdpiper/dakota-experiments | experiments/beaver-creek-lsq/long_profile.py | 1 | 8531 | #! /usr/bin/env python
from __future__ import print_function
import argparse
import re
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def str2num(s):
try:
return int(s)
except ValueError:
return float(s)
class Dakota(object):
FLOAT_REGEX = '[-+]?[0-9]*\.?[0-9]*([eE][-+]?[0-9]+)?'
KEY_REGEX = '(?P<key>\w+)'
VALUE_REGEX = '(?P<value>' + FLOAT_REGEX + ')'
@staticmethod
def read_params(filename):
pattern = re.compile('\s*' + Dakota.VALUE_REGEX + '\s+' +
Dakota.KEY_REGEX)
params = {}
with open(filename, 'r') as fp:
for line in fp:
m = pattern.match(line)
if m is not None:
params[m.group('key')] = str2num(m.group('value'))
return params
@staticmethod
def read_aprepro(filename):
pattern = re.compile('\s*\{\s+' + Dakota.KEY_REGEX + '\s+=\s+' +
Dakota.VALUE_REGEX + '\s+\}')
params = {}
with open(filename, 'r') as fp:
for line in fp:
m = pattern.match(line)
if m is not None:
params[m.group('key')] = str2num(m.group('value'))
return params
@staticmethod
def print_gradients(fp, grads):
for items in zip(*grads):
format_str = '[ ' + ' '.join(['%f'] * len(items)) + ' ]'
print(format_str % items, file=fp)
@staticmethod
def print_hessians(fp, hessians):
for items in zip(*hessians):
format_str = '[[ ' + ' '.join(['%f'] * len(items)) + ' ]]'
print(format_str % items, file=fp)
@staticmethod
def print_results(filename, x, gradients=None, hessians=None):
gradients = gradients or ([], )
hessians = hessians or ([], )
np.savetxt(filename, x)
with open(filename, 'a+') as fp:
Dakota.print_gradients(fp, gradients)
Dakota.print_hessians(fp, hessians)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('params', type=str, nargs='?', help='Dakota parameters file')
parser.add_argument('results', type=str, nargs='?',
help='Dakota results file')
parser.add_argument('--model', choices=('power', 'log', 'peckham'),
default='power',
help='Model used to calculate longitudinal profile')
parser.add_argument('--data', type=str,
default='beaver_creek.npy',
#default='beaver_channel_profile.csv',
help='Data file containing profile elevations')
args = parser.parse_args()
if args.params:
params = Dakota.read_params(args.params)
else:
params = {}
x, z = measured_elevations_from_file(args.data)
params['x0'] = x[0]
params['z0'] = z[0]
if args.model == 'power':
model = PowerLawModel(params=params)
elif args.model == 'log':
model = LogModel(params=params)
else:
model = PeckhamModel(params=params)
if args.results:
Dakota.print_results(args.results, model.residual(x, z),
gradients=model.gradients(x))
else:
model.plot(x, z)
def sum_of_squares(y, f):
return np.sum(np.power(y - f, 2.))
def r_squared(y, f):
return 1. - sum_of_squares(y, f) / sum_of_squares(y, y.mean())
def measured_elevations_from_file(filename):
(x, z) = np.load(filename)
return (x, z)
#data = np.loadtxt(filename)
#return data[:, 0] * 1000., data[:, 1]
class ChannelProfileModel(object):
def __init__(self, params=None):
self._params = params or {}
self._x0 = params.get('x0')
self._z0 = params.get('z0')
def eval(self, x):
raise NotImplementedError('eval')
def residual(self, x, z):
return z - self.eval(x)
def gradients(self, x):
return (self._grad_wrt_c(x), self._grad_wrt_p(x))
def _grad_wrt_c(self, x):
return []
def _grad_wrt_p(self, x):
return []
def plot(self, x, z):
bbox_props = dict(boxstyle='square,pad=.5', fc='none')
sns.set_style('whitegrid')
plt.plot(x / 1000., z)
plt.plot(x / 1000., self.eval(x))
annotation = '\n'.join(['R^2 = %f' % r_squared(z, self.eval(x)),
self.text_summary()])
plt.annotate(annotation, xy=(.05, .95),
xycoords='axes fraction', ha='left', va='top',
bbox=bbox_props)
plt.title('Distance (km) vs elevation (m) for main channel profile of '
'Beaver Creek, KY.')
plt.show()
def text_summary(self):
text = []
for item in self._params.items():
text.append('%s = %f' % item)
return '\n'.join(text)
class PowerLawModel(ChannelProfileModel):
def __init__(self, params=None):
super(PowerLawModel, self).__init__(params=params)
# newton
#self._params.setdefault('c', 2.1784678105e+01)
#self._params.setdefault('p', 1.4312563604e-01)
# global
#self._params.setdefault('c', 4.1460189615e+01)
#self._params.setdefault('p', 5.4463636358e-02)
# local
#self._params.setdefault('c', 6.1090204531e+01)
#self._params.setdefault('p', 1.0056306635e-03)
self._params.setdefault('c', 3.9999968015e+01)
self._params.setdefault('p', 6.1132405380e-02)
def eval(self, x):
c, p, x0 = self._params['c'], self._params['p'], self._params['x0']
return self._z0 - (c / p) * (np.power(x, p) - np.power(x0, p))
def _grad_wrt_c(self, x):
p, x0 = self._params['p'], self._params['x0']
return (- 1. / p) * (np.power(x, p) - np.power(x0, p))
def _grad_wrt_p(self, x):
c, p, x0 = self._params['c'], self._params['p'], self._params['x0']
return (c / p ** 2.) * (
- np.power(x, p) + p * np.power(x, p) * np.log(x) +
np.power(x0, p) - p * np.power(x0, p) * np.log(x0))
def __str__(self):
return '$f(p,x) = (1/p) \, x^p$'
class LogModel(ChannelProfileModel):
def __init__(self, params=None):
super(LogModel, self).__init__(params=params)
# newton
self._params.setdefault('c', 2.0785632989e+02)
self._params.setdefault('p', 6.0921199008e-01)
# local
#self._params.setdefault('c', 1.7369029258e+02)
#self._params.setdefault('p', 6.6198835493e-01)
# global
#self._params.setdefault('c', 2.5405015305e+02)
#self._params.setdefault('p', 5.5275361485e-01)
def eval(self, x):
c, p, x0 = self._params['c'], self._params['p'], self._params['x0']
return self._z0 - c * (np.log(x) ** p - np.log(x0) ** p)
def _grad_wrt_c(self, x):
p, x0 = self._params['p'], self._params['x0']
return - (np.log(x) ** p - np.log(x0) ** p)
def _grad_wrt_p(self, x):
c, p, x0 = self._params['c'], self._params['p'], self._params['x0']
return - c * (np.log(np.log(x)) * np.power(np.log(x), p) -
np.log(np.log(x0)) * np.power(np.log(x0), p))
def __str__(self):
return '$f(p,x) = \log^p(x)$'
class PeckhamModel(ChannelProfileModel):
def __init__(self, params=None):
super(PeckhamModel, self).__init__(params=params)
self._params.setdefault('gamma', -7.6991826046e-01)
self._params.setdefault('r', 5.2248736972e-03)
self._params.setdefault('s0', 6.7005230518e-01)
self._params.setdefault('x0', 0.)
self._params.setdefault('z0', 668.33)
def eval(self, x):
z0, x0, s0 = self._params['z0'], self._params['x0'], self._params['s0']
r_star, gamma = self._params['r'], self._params['gamma']
p_gamma = (gamma + 1.) / gamma
return z0 + (1. / (p_gamma * r_star)) * (
np.power(s0, gamma + 1.) - np.power(np.power(s0, gamma) +
r_star * (x - x0), p_gamma)
)
def gradients(self, x):
return ([], [], [])
def _grad_wrt_s0(self, x):
raise NotImplemented('grad_wrt_s0')
def _grad_wrt_gamma(self, x):
raise NotImplemented('grad_wrt_gamma')
def _grad_wrt_r(self, x):
raise NotImplemented('grad_wrt_r')
def __str__(self):
return '$f(x) = x$'
if __name__ == '__main__':
main()
| mit |
WMD-group/effectivemasstheory | examples/pbs.py | 2 | 6662 | #! /usr/bin/env python
"""Calculate simple semiconductor properties from effective mass theory"""
################################################################################
# Aron Walsh 2014 #
################################################################################
import math as m
import scipy.constants as sc
from numpy import linspace
#import matplotlib.pyplot as plt
from optparse import OptionParser
######################## Set up optional arguments #############################
parser = OptionParser()
parser.add_option("-c", "--electron-effective-mass",
action="store", type="float", dest="e", default=0.1,
help="Average electron (conduction band) effective mass")
parser.add_option("-v", "--hole-effective-mass",
action="store", type="float", dest="h", default=0.1,
help="Average hole (valence band) effective mass")
parser.add_option("-s", "--static-dielectric",
action="store", type="float", dest="d0", default=170,
help="Static (low-frequency) dielectric constant")
parser.add_option("-o", "--optical-dielectric",
action="store", type="float", dest="d1", default=17.2,
help="Optical (high-frequency) dielectric constant")
#########################defaults for CH3NH3PbI3################################
#
#parser.add_option("-p", "--optical-phonon",
# action="store", type="float", dest="lo", default=9.3,
# help="Optical (polaron active) phonon in THz")
### Further options go here ###
(options,args) = parser.parse_args()
########################### Begin main program #################################
print "*A program for semiconductor properties from effective mass theory* \n"
# See, e.g. Fundamentals of Semiconductors, Yu and Cardona
print "Aron Walsh (University of Bath) \nDate last edited: 22/11/2014 \n"
# Get electron effective mass
if options.e ==0:
e = raw_input("What is the electron effective mass (e.g. 0.3 me)?")
e = float(e)
else:
e = options.e
# Get hole effective mass
if options.h ==0:
h = raw_input("What is the hole effective mass (e.g. 0.3 me)?")
h = float(h)
else:
h = options.h
# Get static (low frequency) dielectric constant
if options.d0 ==0:
d0 = raw_input("What is the static dielectric constant (e.g. 10)?")
d0 = float(d0)
else:
d0 = options.d0
# Get optical (high frequency) dielectric constant
if options.d1 ==0:
d1 = raw_input("What is the optical dielectric constant (e.g. 5)?")
d1 = float(d1)
else:
d1 = options.d1
# Get optical phonon frequency
#if options.lo ==0:
# lo = raw_input("What is the optical phonon frequency (e.g. 1 THz)?")
# lo = float(lo)
#else:
# lo = options.lo
#
# Calculate properties
#
# Reduced effective mass
mass=((e*h)/(e+h))
diel=(1/d1-1/d0)
print ("*Effective mass \nHole mass: " + str(h) + " me")
print ("Electron mass: " + str(e) + " me")
print ("Reduced mass: %3.2f me\n" % (mass))
# Exciton Bohr radius
radius_bohr=(d0/mass)
radius_bohr_h=(d0/h)
radius_bohr_e=(d0/e)
radius=(d0/mass)*0.529177249
radius_h=(d0/h)*0.529177249
radius_e=(d0/e)*0.529177249
print ("*Shallow defects \nAcceptor radius: %3.2f A (%3.2f nm)" %(radius_h, radius_h/10))
print ("Donor radius: %3.2f A (%3.2f nm)\n" %(radius_e, radius_e/10))
# (Static) Exciton binding energy
binding=((1/(d0*radius_bohr))*(13.605698066*1000))
print ("*Mott-Wannier analysis \nThermal exciton radius: %3.2f A" %(radius))
print ("Thermal exciton binding energy: %3.2f meV" %(binding))
# (Optical) Exciton binding energy
radius_bohr_o=(d1/mass)
radius_o=(d1/mass)*0.529177249
binding_o_ryd=1/(d1*radius_bohr_o)
binding_o=binding_o_ryd*13.605698066*1000
print ("\nOptical exciton radius: %3.2f A" %(radius_o))
print ("Optical exciton binding energy: %3.2f meV" %(binding_o))
# Carrier polaron radius
# From Mott (1968)
radius_bh=(2/(h*diel))*0.529177249
print ("\nHole (band) polaron radius: %3.2f A" %(radius_bh))
radius_be=(2/(e*diel))*0.529177249
print ("Electron (band) polaron radius: %3.2f A\n" %(radius_be))
# Quantum dot properties
print ("*Quantum dots")
confine=radius_o*(sc.pi*sc.pi)/3.6
print ("Confinement radius: %3.0f nm" %(confine/10))
radius_qd=2 #nm
radius_qd_bohr=radius_qd*18.8971616463
#change in band gap (spherical confinement + coulomb attraction + rydberg correction)
delta_e_ryd=(sc.pi*sc.pi)/(2*mass*radius_qd_bohr*radius_qd_bohr)-(1.786/(d1*radius_qd_bohr))-(0.248*binding_o_ryd)
delta_e=delta_e_ryd*13.605698066*1000
print ("r=2nm optical gap enhancement: %3.0f meV \n" %(delta_e))
#
# AW: Should fix this at some stage
#
# Frohlich (lage polaron) properties
# Speed of light in atomic units
# c=1/sc.alpha
# LO frequency (from THz -> Ry)
# freq=lo*0.0003039659692
# Small polaron coupling constant
# h_alpha=diel*m.sqrt(h/(2*freq))
# e_alpha=diel*m.sqrt(e/(2*freq))
# Small polaron mass (Feynman)
# h_pol=h*(1+h_alpha/6)
# h_pol=h*((1-0.0008*h_alpha*h_alpha)/(1-h_alpha/6+0.0034*h_alpha*h_alpha))
# radius_bhp=(2/(h_pol*diel))*0.529177249
# e_pol=e*(1+e_alpha/6)
# e_pol=e*((1-0.0008*e_alpha*e_alpha)/(1-e_alpha/6+0.0034*e_alpha*e_alpha))
# radius_bep=(2/(e_pol*diel))*0.529177249
# print ("*Hole Polarons \nFrohlich coupling constant: " + str(h_alpha))
# print ("Effective polaron mass: " + str(h_pol) + " me")
# print ("Polaron radius: " + str(radius_bhp) + " A \n")
# print ("*Electron Polarons \nFrohlich coupling constant: " + str(e_alpha))
# print ("Effective polaron mass: " + str(e_pol) + " me")
# print ("Polaron radius: " + str(radius_bep) + " A \n")
# Mott transition
# Exciton transition ~ 1/exciton volume (Optical properties of Solids - Mark Fox)
mott=((1/(4/3*sc.pi*(radius_bohr**3)))*(188971616.463**3))
print ("*Mott criterion (critical concentrations) \nExciton: %3.0e cm-3" %(mott))
# Mott transition (holes)
mott=(((0.26/radius_bohr_h)**3)*(188971616.463**3))
print ("Holes: %3.0e cm-3" %(mott))
# Mott transition (electrons)
mott=(((0.26/radius_bohr_e)**3)*(188971616.463**3))
print ("Electrons: %3.0e cm-3" %(mott))
# Note that the value of 0.26 for the Mott Criteron is taken from:
# "Universality aspects of the metal-nonmetal transition in condensed media"
# Edwards and Seinko, PRB 17, 2575 (1978) | gpl-2.0 |
elijah513/scikit-learn | sklearn/decomposition/truncated_svd.py | 199 | 7744 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# Olivier Grisel <olivier.grisel@ensta.org>
# Michael Becker <mike@beckerfuffle.com>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.27930...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
ronny3050/MobileNet | retrain.py | 1 | 56996 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Simple transfer learning with Inception v3 or Mobilenet models.
With support for TensorBoard.
This example shows how to take a Inception v3 or Mobilenet model trained on
ImageNet images, and train a new top layer that can recognize other classes of
images.
The top layer receives as input a 2048-dimensional vector (1001-dimensional for
Mobilenet) for each image. We train a softmax layer on top of this
representation. Assuming the softmax layer contains N labels, this corresponds
to learning N + 2048*N (or 1001*N) model parameters corresponding to the
learned biases and weights.
Here's an example, which assumes you have a folder containing class-named
subfolders, each full of images for each label. The example folder flower_photos
should have a structure like this:
~/flower_photos/daisy/photo1.jpg
~/flower_photos/daisy/photo2.jpg
...
~/flower_photos/rose/anotherphoto77.jpg
...
~/flower_photos/sunflower/somepicture.jpg
The subfolder names are important, since they define what label is applied to
each image, but the filenames themselves don't matter. Once your images are
prepared, you can run the training with a command like this:
```bash
bazel build tensorflow/examples/image_retraining:retrain && \
bazel-bin/tensorflow/examples/image_retraining/retrain \
--image_dir ~/flower_photos
```
Or, if you have a pip installation of tensorflow, `retrain.py` can be run
without bazel:
```bash
python tensorflow/examples/image_retraining/retrain.py \
--image_dir ~/flower_photos
```
You can replace the image_dir argument with any folder containing subfolders of
images. The label for each image is taken from the name of the subfolder it's
in.
This produces a new model file that can be loaded and run by any TensorFlow
program, for example the label_image sample code.
By default this script will use the high accuracy, but comparatively large and
slow Inception v3 model architecture. It's recommended that you start with this
to validate that you have gathered good training data, but if you want to deploy
on resource-limited platforms, you can try the `--architecture` flag with a
Mobilenet model. For example:
```bash
python tensorflow/examples/image_retraining/retrain.py \
--image_dir ~/flower_photos --architecture mobilenet_1.0_224
```
There are 32 different Mobilenet models to choose from, with a variety of file
size and latency options. The first number can be '1.0', '0.75', '0.50', or
'0.25' to control the size, and the second controls the input image size, either
'224', '192', '160', or '128', with smaller sizes running faster. See
https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html
for more information on Mobilenet.
To use with TensorBoard:
By default, this script will log summaries to /tmp/retrain_logs directory
Visualize the summaries with this command:
tensorboard --logdir /tmp/retrain_logs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from datetime import datetime
import hashlib
import os.path
import random
import re
import sys
import tarfile
from sklearn.model_selection import train_test_split
import numpy as np
from six.moves import urllib
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
FLAGS = None
# These are all parameters that are tied to the particular model architecture
# we're using for Inception v3. These include things like tensor names and their
# sizes. If you want to adapt this script to work with another model, you will
# need to update these to reflect the values in the network you're using.
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M
def create_image_lists(image_dir, testing_percentage, validation_percentage):
"""Builds a list of training images from the file system.
Analyzes the sub folders in the image directory, splits them into stable
training, testing, and validation sets, and returns a data structure
describing the lists of images for each label and their paths.
Args:
image_dir: String path to a folder containing subfolders of images.
testing_percentage: Integer percentage of the images to reserve for tests.
validation_percentage: Integer percentage of images reserved for validation.
Returns:
A dictionary containing an entry for each label subfolder, with images split
into training, testing, and validation sets within each label.
"""
if not gfile.Exists(image_dir):
tf.logging.error("Image directory '" + image_dir + "' not found.")
return None
result = {}
sub_dirs = [x[0] for x in gfile.Walk(image_dir)]
# The root directory comes first, so skip it.
is_root_dir = True
for sub_dir in sub_dirs:
if is_root_dir:
is_root_dir = False
continue
extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
tf.logging.info("Looking for images in '" + dir_name + "'")
for extension in extensions:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(gfile.Glob(file_glob))
if not file_list:
tf.logging.warning('No files found')
continue
if len(file_list) < 20:
tf.logging.warning(
'WARNING: Folder has less than 20 images, which may cause issues.')
elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:
tf.logging.warning(
'WARNING: Folder {} has more than {} images. Some images will '
'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))
label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
training_images = []
testing_images = []
validation_images = []
file_names = [os.path.basename(file_name) for file_name in file_list]
training_images, validation_images = train_test_split(file_names,
test_size=validation_percentage/100.0)
## for file_name in file_list:
## base_name = os.path.basename(file_name)
## # We want to ignore anything after '_nohash_' in the file name when
## # deciding which set to put an image in, the data set creator has a way of
## # grouping photos that are close variations of each other. For example
## # this is used in the plant disease data set to group multiple pictures of
## # the same leaf.
## hash_name = re.sub(r'_nohash_.*$', '', file_name)
## # This looks a bit magical, but we need to decide whether this file should
## # go into the training, testing, or validation sets, and we want to keep
## # existing files in the same set even if more files are subsequently
## # added.
## # To do that, we need a stable way of deciding based on just the file name
## # itself, so we do a hash of that and then use that to generate a
## # probability value that we use to assign it.
## hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()
## percentage_hash = ((int(hash_name_hashed, 16) %
## (MAX_NUM_IMAGES_PER_CLASS + 1)) *
## (100.0 / MAX_NUM_IMAGES_PER_CLASS))
## if percentage_hash < validation_percentage:
## validation_images.append(base_name)
#### elif percentage_hash < (testing_percentage + validation_percentage):
#### testing_images.append(base_name)
## else:
## training_images.append(base_name)
result[label_name] = {
'dir': dir_name,
'training': training_images,
'testing': testing_images,
'validation': validation_images,
}
return result
def get_image_path(image_lists, label_name, index, image_dir, category):
""""Returns a path to an image for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Int offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
if label_name not in image_lists:
tf.logging.fatal('Label does not exist %s.', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
tf.logging.fatal('Category does not exist %s.', category)
category_list = label_lists[category]
if not category_list:
tf.logging.fatal('Label %s has no images in the category %s.',
label_name, category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,
category, architecture):
""""Returns a path to a bottleneck file for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
bottleneck_dir: Folder string holding cached files of bottleneck values.
category: Name string of set to pull images from - training, testing, or
validation.
architecture: The name of the model architecture.
Returns:
File system path string to an image that meets the requested parameters.
"""
return get_image_path(image_lists, label_name, index, bottleneck_dir,
category) + '_' + architecture + '.txt'
def create_model_graph(model_info):
""""Creates a graph from saved GraphDef file and returns a Graph object.
Args:
model_info: Dictionary containing information about the model architecture.
Returns:
Graph holding the trained Inception network, and various tensors we'll be
manipulating.
"""
with tf.Graph().as_default() as graph:
model_path = os.path.join(FLAGS.model_dir, model_info['model_file_name'])
with gfile.FastGFile(model_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, resized_input_tensor = (tf.import_graph_def(
graph_def,
name='',
return_elements=[
model_info['bottleneck_tensor_name'],
model_info['resized_input_tensor_name'],
]))
return graph, bottleneck_tensor, resized_input_tensor
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor):
"""Runs inference on an image to extract the 'bottleneck' summary layer.
Args:
sess: Current active TensorFlow Session.
image_data: String of raw JPEG data.
image_data_tensor: Input data layer in the graph.
decoded_image_tensor: Output of initial image resizing and preprocessing.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: Layer before the final softmax.
Returns:
Numpy array of bottleneck values.
"""
# First decode the JPEG image, resize it, and rescale the pixel values.
resized_input_values = sess.run(decoded_image_tensor,
{image_data_tensor: image_data})
# Then run it through the recognition network.
bottleneck_values = sess.run(bottleneck_tensor,
{resized_input_tensor: resized_input_values})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
def maybe_download_and_extract(data_url):
"""Download and extract model tar file.
If the pretrained model we're using doesn't already exist, this function
downloads it from the TensorFlow.org website and unpacks it into a directory.
Args:
data_url: Web location of the tar file containing the pretrained model.
"""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = data_url.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' %
(filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(data_url, filepath, _progress)
print()
statinfo = os.stat(filepath)
tf.logging.info('Successfully downloaded', filename, statinfo.st_size,
'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def ensure_dir_exists(dir_name):
"""Makes sure the folder exists on disk.
Args:
dir_name: Path string to the folder we want to create.
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
bottleneck_path_2_bottleneck_values = {}
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor):
"""Create a single bottleneck file."""
tf.logging.info('Creating bottleneck at ' + bottleneck_path)
image_path = get_image_path(image_lists, label_name, index,
image_dir, category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
image_data = gfile.FastGFile(image_path, 'rb').read()
try:
bottleneck_values = run_bottleneck_on_image(
sess, image_data, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor)
except Exception as e:
raise RuntimeError('Error during processing file %s (%s)' % (image_path,
str(e)))
bottleneck_string = ','.join(str(x) for x in bottleneck_values)
with open(bottleneck_path, 'w') as bottleneck_file:
bottleneck_file.write(bottleneck_string)
def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,
category, bottleneck_dir, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor, architecture):
"""Retrieves or calculates bottleneck values for an image.
If a cached version of the bottleneck data exists on-disk, return that,
otherwise calculate the data and save it to disk for future use.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be modulo-ed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of which set to pull images from - training, testing,
or validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: The tensor to feed loaded jpeg data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The output tensor for the bottleneck values.
architecture: The name of the model architecture.
Returns:
Numpy array of values produced by the bottleneck layer for the image.
"""
label_lists = image_lists[label_name]
sub_dir = label_lists['dir']
sub_dir_path = os.path.join(bottleneck_dir, sub_dir)
ensure_dir_exists(sub_dir_path)
bottleneck_path = get_bottleneck_path(image_lists, label_name, index,
bottleneck_dir, category, architecture)
if not os.path.exists(bottleneck_path):
create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
did_hit_error = False
try:
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
except ValueError:
tf.logging.warning('Invalid float found, recreating bottleneck')
did_hit_error = True
if did_hit_error:
create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
# Allow exceptions to propagate here, since they shouldn't happen after a
# fresh creation
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
return bottleneck_values
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,
jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture):
"""Ensures all the training, testing, and validation bottlenecks are cached.
Because we're likely to read the same image multiple times (if there are no
distortions applied during training) it can speed things up a lot if we
calculate the bottleneck layer values once for each image during
preprocessing, and then just read those cached values repeatedly during
training. Here we go through all the images we've found, calculate those
values, and save them off.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
image_dir: Root folder string of the subfolders containing the training
images.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: Input tensor for jpeg data from file.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The penultimate output layer of the graph.
architecture: The name of the model architecture.
Returns:
Nothing.
"""
how_many_bottlenecks = 0
ensure_dir_exists(bottleneck_dir)
for label_name, label_lists in image_lists.items():
for category in ['training', 'testing', 'validation']:
category_list = label_lists[category]
for index, unused_base_name in enumerate(category_list):
get_or_create_bottleneck(
sess, image_lists, label_name, index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture)
how_many_bottlenecks += 1
if how_many_bottlenecks % 100 == 0:
tf.logging.info(
str(how_many_bottlenecks) + ' bottleneck files created.')
def get_random_cached_bottlenecks(sess, image_lists, how_many, category,
bottleneck_dir, image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor, architecture):
"""Retrieves bottleneck values for cached images.
If no distortions are being applied, this function can retrieve the cached
bottleneck values directly from disk for images. It picks a random set of
images from the specified category.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: If positive, a random sample of this size will be chosen.
If negative, all bottlenecks will be retrieved.
category: Name string of which set to pull from - training, testing, or
validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
image_dir: Root folder string of the subfolders containing the training
images.
jpeg_data_tensor: The layer to feed jpeg image data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
architecture: The name of the model architecture.
Returns:
List of bottleneck arrays, their corresponding ground truths, and the
relevant filenames.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
filenames = []
if how_many >= 0:
# Retrieve a random sample of bottlenecks.
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
else:
# Retrieve all bottlenecks.
for label_index, label_name in enumerate(image_lists.keys()):
for image_index, image_name in enumerate(
image_lists[label_name][category]):
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
return bottlenecks, ground_truths, filenames
def get_random_distorted_bottlenecks(
sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,
distorted_image, resized_input_tensor, bottleneck_tensor):
"""Retrieves bottleneck values for training images, after distortions.
If we're training with distortions like crops, scales, or flips, we have to
recalculate the full model for every image, and so we can't use cached
bottleneck values. Instead we find random images for the requested category,
run them through the distortion graph, and then the full graph to get the
bottleneck results for each.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: The integer number of bottleneck values to return.
category: Name string of which set of images to fetch - training, testing,
or validation.
image_dir: Root folder string of the subfolders containing the training
images.
input_jpeg_tensor: The input layer we feed the image data to.
distorted_image: The output node of the distortion graph.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays and their corresponding ground truths.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_path = get_image_path(image_lists, label_name, image_index, image_dir,
category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
jpeg_data = gfile.FastGFile(image_path, 'rb').read()
# Note that we materialize the distorted_image_data as a numpy array before
# sending running inference on the image. This involves 2 memory copies and
# might be optimized in other implementations.
distorted_image_data = sess.run(distorted_image,
{input_jpeg_tensor: jpeg_data})
bottleneck_values = sess.run(bottleneck_tensor,
{resized_input_tensor: distorted_image_data})
bottleneck_values = np.squeeze(bottleneck_values)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck_values)
ground_truths.append(ground_truth)
return bottlenecks, ground_truths
def should_distort_images(flip_left_right, random_crop, random_scale,
random_brightness):
"""Whether any distortions are enabled, from the input flags.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
Returns:
Boolean value indicating whether any distortions should be applied.
"""
return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
(random_brightness != 0))
def add_input_distortions(flip_left_right, random_crop, random_scale,
random_brightness, input_width, input_height,
input_depth, input_mean, input_std):
"""Creates the operations to apply the specified distortions.
During training it can help to improve the results if we run the images
through simple distortions like crops, scales, and flips. These reflect the
kind of variations we expect in the real world, and so can help train the
model to cope with natural data more effectively. Here we take the supplied
parameters and construct a network of operations to apply them to an image.
Cropping
~~~~~~~~
Cropping is done by placing a bounding box at a random position in the full
image. The cropping parameter controls the size of that box relative to the
input image. If it's zero, then the box is the same size as the input and no
cropping is performed. If the value is 50%, then the crop box will be half the
width and height of the input. In a diagram it looks like this:
< width >
+---------------------+
| |
| width - crop% |
| < > |
| +------+ |
| | | |
| | | |
| | | |
| +------+ |
| |
| |
+---------------------+
Scaling
~~~~~~~
Scaling is a lot like cropping, except that the bounding box is always
centered and its size varies randomly within the given range. For example if
the scale percentage is zero, then the bounding box is the same size as the
input and no scaling is applied. If it's 50%, then the bounding box will be in
a random range between half the width and height and full size.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
graph.
input_width: Horizontal size of expected input image to model.
input_height: Vertical size of expected input image to model.
input_depth: How many channels the expected input image should have.
input_mean: Pixel value that should be zero in the image for the graph.
input_std: How much to divide the pixel values by before recognition.
Returns:
The jpeg input layer and the distorted result tensor.
"""
jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
margin_scale = 1.0 + (random_crop / 100.0)
resize_scale = 1.0 + (random_scale / 100.0)
margin_scale_value = tf.constant(margin_scale)
resize_scale_value = tf.random_uniform(tensor_shape.scalar(),
minval=1.0,
maxval=resize_scale)
scale_value = tf.multiply(margin_scale_value, resize_scale_value)
precrop_width = tf.multiply(scale_value, input_width)
precrop_height = tf.multiply(scale_value, input_height)
precrop_shape = tf.stack([precrop_height, precrop_width])
precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)
precropped_image = tf.image.resize_bilinear(decoded_image_4d,
precrop_shape_as_int)
precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])
cropped_image = tf.random_crop(precropped_image_3d,
[input_height, input_width, input_depth])
if flip_left_right:
flipped_image = tf.image.random_flip_left_right(cropped_image)
else:
flipped_image = cropped_image
brightness_min = 1.0 - (random_brightness / 100.0)
brightness_max = 1.0 + (random_brightness / 100.0)
brightness_value = tf.random_uniform(tensor_shape.scalar(),
minval=brightness_min,
maxval=brightness_max)
brightened_image = tf.multiply(flipped_image, brightness_value)
offset_image = tf.subtract(brightened_image, input_mean)
mul_image = tf.multiply(offset_image, 1.0 / input_std)
distort_result = tf.expand_dims(mul_image, 0, name='DistortResult')
return jpeg_data, distort_result
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def get_center_loss(features, labels, alpha, num_classes):
nrof_features = features.get_shape()[1]
centers = tf.get_variable('centers', [num_classes, nrof_features],
dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
labels = tf.argmax(labels,1)
label = tf.reshape(labels, [-1])
centers_batch = tf.gather(centers, label)
diff = (1 - alpha) * (centers_batch - features)
centers = tf.scatter_sub(centers, label, diff)
with tf.control_dependencies([centers]):
loss = tf.reduce_mean(tf.square(features - centers_batch))
return loss, centers
def add_final_training_ops(class_count, labels, final_tensor_name, bottleneck_tensor,
bottleneck_tensor_size):
"""Adds a new softmax and fully-connected layer for training.
We need to retrain the top layer to identify our new classes, so this function
adds the right operations to the graph, along with some variables to hold the
weights, and then sets up all the gradients for the backward pass.
The set up for the softmax and fully-connected layers is based on:
https://www.tensorflow.org/versions/master/tutorials/mnist/beginners/index.html
Args:
class_count: Integer of how many categories of things we're trying to
recognize.
final_tensor_name: Name string for the new final node that produces results.
bottleneck_tensor: The output of the main CNN graph.
bottleneck_tensor_size: How many entries in the bottleneck vector.
Returns:
The tensors for the training and cross entropy results, and tensors for the
bottleneck input and ground truth input.
"""
with tf.name_scope('input'):
bottleneck_input = tf.placeholder_with_default(
bottleneck_tensor,
shape=[None, bottleneck_tensor_size],
name='BottleneckInputPlaceholder')
ground_truth_input = tf.placeholder(tf.int32,
[None, class_count],
name='GroundTruthInput')
# Organizing the following ops as `final_training_ops` so they're easier
# to see in TensorBoard
layer_name = 'final_training_ops'
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
initial_value = tf.truncated_normal(
[bottleneck_tensor_size, class_count], stddev=0.001)
layer_weights = tf.Variable(initial_value, name='final_weights')
variable_summaries(layer_weights)
with tf.name_scope('biases'):
layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
variable_summaries(layer_biases)
with tf.name_scope('Wx_plus_b'):
logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases
tf.summary.histogram('pre_activations', logits)
final_tensor = tf.nn.softmax(logits, name=final_tensor_name)
tf.summary.histogram('activations', final_tensor)
with tf.name_scope('center_loss'):
center_loss, centers = get_center_loss(bottleneck_input,
ground_truth_input,
FLAGS.center_loss_alpha,
class_count)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=ground_truth_input, logits=logits)
with tf.name_scope('total'):
cross_entropy_mean = tf.reduce_mean(cross_entropy)
total_loss = cross_entropy_mean + FLAGS.center_loss_factor * center_loss
tf.summary.scalar('center_loss', center_loss)
tf.summary.scalar('cross_entropy', cross_entropy_mean)
tf.summary.scalar('total_loss', total_loss)
with tf.name_scope('train'):
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
train_step = optimizer.minimize(total_loss)
return (train_step, total_loss, bottleneck_input, ground_truth_input,
final_tensor)
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Tuple of (evaluation step, prediction).
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
prediction = tf.argmax(result_tensor, 1)
correct_prediction = tf.equal(
prediction, tf.argmax(ground_truth_tensor, 1))
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
return evaluation_step, prediction
def save_graph_to_file(sess, graph, graph_file_name):
output_graph_def = graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), [FLAGS.final_tensor_name])
with gfile.FastGFile(graph_file_name, 'wb') as f:
f.write(output_graph_def.SerializeToString())
return
def prepare_file_system():
# Setup the directory we'll write summaries to for TensorBoard
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
if FLAGS.intermediate_store_frequency > 0:
ensure_dir_exists(FLAGS.intermediate_output_graphs_dir)
return
def create_model_info(architecture):
"""Given the name of a model architecture, returns information about it.
There are different base image recognition pretrained models that can be
retrained using transfer learning, and this function translates from the name
of a model to the attributes that are needed to download and train with it.
Args:
architecture: Name of a model architecture.
Returns:
Dictionary of information about the model, or None if the name isn't
recognized
Raises:
ValueError: If architecture name is unknown.
"""
architecture = architecture.lower()
if architecture == 'inception_v3':
# pylint: disable=line-too-long
data_url = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
bottleneck_tensor_name = 'pool_3/_reshape:0'
bottleneck_tensor_size = 2048
input_width = 299
input_height = 299
input_depth = 3
resized_input_tensor_name = 'Mul:0'
model_file_name = 'classify_image_graph_def.pb'
input_mean = 128
input_std = 128
elif architecture.startswith('mobilenet_'):
parts = architecture.split('_')
if len(parts) != 3 and len(parts) != 4:
tf.logging.error("Couldn't understand architecture name '%s'",
architecture)
return None
version_string = parts[1]
if (version_string != '1.0' and version_string != '0.75' and
version_string != '0.50' and version_string != '0.25'):
tf.logging.error(
""""The Mobilenet version should be '1.0', '0.75', '0.50', or '0.25',
but found '%s' for architecture '%s'""",
version_string, architecture)
return None
size_string = parts[2]
if (size_string != '224' and size_string != '192' and
size_string != '160' and size_string != '128'):
tf.logging.error(
"""The Mobilenet input size should be '224', '192', '160', or '128',
but found '%s' for architecture '%s'""",
size_string, architecture)
return None
if len(parts) == 3:
is_quantized = False
else:
if parts[3] != 'quantized':
tf.logging.error(
"Couldn't understand architecture suffix '%s' for '%s'", parts[3],
architecture)
return None
is_quantized = True
data_url = 'http://download.tensorflow.org/models/mobilenet_v1_'
data_url += version_string + '_' + size_string + '_frozen.tgz'
bottleneck_tensor_name = 'MobilenetV1/Predictions/Reshape:0'
bottleneck_tensor_size = 1001
input_width = int(size_string)
input_height = int(size_string)
input_depth = 3
resized_input_tensor_name = 'input:0'
if is_quantized:
model_base_name = 'quantized_graph.pb'
else:
model_base_name = 'frozen_graph.pb'
model_dir_name = 'mobilenet_v1_' + version_string + '_' + size_string
model_file_name = os.path.join(model_dir_name, model_base_name)
input_mean = 127.5
input_std = 127.5
else:
tf.logging.error("Couldn't understand architecture name '%s'", architecture)
raise ValueError('Unknown architecture', architecture)
return {
'data_url': data_url,
'bottleneck_tensor_name': bottleneck_tensor_name,
'bottleneck_tensor_size': bottleneck_tensor_size,
'input_width': input_width,
'input_height': input_height,
'input_depth': input_depth,
'resized_input_tensor_name': resized_input_tensor_name,
'model_file_name': model_file_name,
'input_mean': input_mean,
'input_std': input_std,
}
def add_jpeg_decoding(input_width, input_height, input_depth, input_mean,
input_std):
"""Adds operations that perform JPEG decoding and resizing to the graph..
Args:
input_width: Desired width of the image fed into the recognizer graph.
input_height: Desired width of the image fed into the recognizer graph.
input_depth: Desired channels of the image fed into the recognizer graph.
input_mean: Pixel value that should be zero in the image for the graph.
input_std: How much to divide the pixel values by before recognition.
Returns:
Tensors for the node to feed JPEG data into, and the output of the
preprocessing steps.
"""
jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
resize_shape = tf.stack([input_height, input_width])
resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)
resized_image = tf.image.resize_bilinear(decoded_image_4d,
resize_shape_as_int)
offset_image = tf.subtract(resized_image, input_mean)
mul_image = tf.multiply(offset_image, 1.0 / input_std)
return jpeg_data, mul_image
def get_numerical_labels(image_lists):
numerical_labels = []
cnt = 0
prev = ''
for i in image_lists.keys():
numerical_labels.append(cnt)
cnt = cnt + 1
return numerical_labels
def main(_):
# Needed to make sure the logging output is visible.
# See https://github.com/tensorflow/tensorflow/issues/3047
tf.logging.set_verbosity(tf.logging.INFO)
# Prepare necessary directories that can be used during training
prepare_file_system()
# Gather information about the model architecture we'll be using.
model_info = create_model_info(FLAGS.architecture)
if not model_info:
tf.logging.error('Did not recognize architecture flag')
return -1
# Set up the pre-trained graph.
maybe_download_and_extract(model_info['data_url'])
graph, bottleneck_tensor, resized_image_tensor = (
create_model_graph(model_info))
# Look at the folder structure, and create lists of all the images.
image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,
FLAGS.validation_percentage)
f = open( 'image_lists.py', 'w' )
f.write( 'dict = ' + repr(image_lists) + '\n' )
f.close()
class_count = len(image_lists.keys())
if class_count == 0:
tf.logging.error('No valid folders of images found at ' + FLAGS.image_dir)
return -1
if class_count == 1:
tf.logging.error('Only one valid folder of images found at ' +
FLAGS.image_dir +
' - multiple classes are needed for classification.')
return -1
# See if the command-line flags mean we're applying any distortions.
do_distort_images = should_distort_images(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
numerical_labels = get_numerical_labels(image_lists)
with tf.Session(graph=graph) as sess:
# Set up the image decoding sub-graph.
jpeg_data_tensor, decoded_image_tensor = add_jpeg_decoding(
model_info['input_width'], model_info['input_height'],
model_info['input_depth'], model_info['input_mean'],
model_info['input_std'])
if do_distort_images:
# We will be applying distortions, so setup the operations we'll need.
(distorted_jpeg_data_tensor,
distorted_image_tensor) = add_input_distortions(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness, model_info['input_width'],
model_info['input_height'], model_info['input_depth'],
model_info['input_mean'], model_info['input_std'])
else:
# We'll make sure we've calculated the 'bottleneck' image summaries and
# cached them on disk.
cache_bottlenecks(sess, image_lists, FLAGS.image_dir,
FLAGS.bottleneck_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor,
bottleneck_tensor, FLAGS.architecture)
# Add the new layer that we'll be training.
(train_step, total_loss, bottleneck_input, ground_truth_input,
final_tensor) = add_final_training_ops(
len(image_lists.keys()), get_numerical_labels(image_lists), FLAGS.final_tensor_name, bottleneck_tensor,
model_info['bottleneck_tensor_size'])
# Create the operations we need to evaluate the accuracy of our new layer.
evaluation_step, prediction = add_evaluation_step(
final_tensor, ground_truth_input)
# Merge all the summaries and write them out to the summaries_dir
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(
FLAGS.summaries_dir + '/validation')
# Set up all our weights to their initial default values.
init = tf.global_variables_initializer()
sess.run(init)
# Run the training for as many cycles as requested on the command line.
for i in range(FLAGS.how_many_training_steps):
# Get a batch of input bottleneck values, either calculated fresh every
# time with distortions applied, or from the cache stored on disk.
if do_distort_images:
(train_bottlenecks,
train_ground_truth) = get_random_distorted_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.image_dir, distorted_jpeg_data_tensor,
distorted_image_tensor, resized_image_tensor, bottleneck_tensor)
else:
(train_bottlenecks,
train_ground_truth, _) = get_random_cached_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
FLAGS.architecture)
# Feed the bottlenecks and ground truth into the graph, and run a training
# step. Capture training summaries for TensorBoard with the `merged` op.
train_summary, _ = sess.run(
[merged, train_step],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
train_writer.add_summary(train_summary, i)
# Every so often, print out how well the graph is training.
is_last_step = (i + 1 == FLAGS.how_many_training_steps)
if (i % FLAGS.eval_step_interval) == 0 or is_last_step:
train_accuracy, total_loss_values = sess.run(
[evaluation_step, total_loss],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
tf.logging.info('%s: Step %d: Train accuracy = %.1f%%' %
(datetime.now(), i, train_accuracy * 100))
tf.logging.info('%s: Step %d: Total loss = %f' %
(datetime.now(), i, total_loss_values))
validation_bottlenecks, validation_ground_truth, _ = (
get_random_cached_bottlenecks(
sess, image_lists, FLAGS.validation_batch_size, 'validation',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
FLAGS.architecture))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy = sess.run(
[merged, evaluation_step],
feed_dict={bottleneck_input: validation_bottlenecks,
ground_truth_input: validation_ground_truth})
validation_writer.add_summary(validation_summary, i)
tf.logging.info('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %
(datetime.now(), i, validation_accuracy * 100,
len(validation_bottlenecks)))
# Store intermediate results
intermediate_frequency = FLAGS.intermediate_store_frequency
if (intermediate_frequency > 0 and (i % intermediate_frequency == 0)
and i > 0):
intermediate_file_name = (FLAGS.intermediate_output_graphs_dir +
'intermediate_' + str(i) + '.pb')
tf.logging.info('Save intermediate result to : ' +
intermediate_file_name)
save_graph_to_file(sess, graph, intermediate_file_name)
# We've completed all our training, so run a final test evaluation on
# some new images we haven't used before.
if(FLAGS.testing_percentage > 0):
test_bottlenecks, test_ground_truth, test_filenames = (
get_random_cached_bottlenecks(
sess, image_lists, FLAGS.test_batch_size, 'testing',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
FLAGS.architecture))
test_accuracy, predictions = sess.run(
[evaluation_step, prediction],
feed_dict={bottleneck_input: test_bottlenecks,
ground_truth_input: test_ground_truth})
tf.logging.info('Final test accuracy = %.1f%% (N=%d)' %
(test_accuracy * 100, len(test_bottlenecks)))
if FLAGS.print_misclassified_test_images:
tf.logging.info('=== MISCLASSIFIED TEST IMAGES ===')
for i, test_filename in enumerate(test_filenames):
if predictions[i] != test_ground_truth[i].argmax():
tf.logging.info('%70s %s' %
(test_filename,
list(image_lists.keys())[predictions[i]]))
# Write out the trained graph and labels with the weights stored as
# constants.
save_graph_to_file(sess, graph, FLAGS.output_graph)
with gfile.FastGFile(FLAGS.output_labels, 'w') as f:
f.write('\n'.join(image_lists.keys()) + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--image_dir',
type=str,
default='',
help='Path to folders of labeled images.'
)
parser.add_argument(
'--output_graph',
type=str,
default='tmp/output_graph.pb',
help='Where to save the trained graph.'
)
parser.add_argument(
'--intermediate_output_graphs_dir',
type=str,
default='',
help='Where to save the intermediate graphs.'
)
parser.add_argument(
'--intermediate_store_frequency',
type=int,
default=0,
help="""\
How many steps to store intermediate graph. If "0" then will not
store.\
"""
)
parser.add_argument(
'--center_loss',
default=False,
help="""\
Use center loss along with softmax?
"""
)
parser.add_argument(
'--output_labels',
type=str,
default='tmp/output_labels.txt',
help='Where to save the trained graph\'s labels.'
)
parser.add_argument(
'--summaries_dir',
type=str,
default='tmp/retrain_logs',
help='Where to save summary logs for TensorBoard.'
)
parser.add_argument(
'--how_many_training_steps',
type=int,
default=4000,
help='How many training steps to run before ending.'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='How large a learning rate to use when training.'
)
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of images to use as a test set.'
)
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of images to use as a validation set.'
)
parser.add_argument(
'--eval_step_interval',
type=int,
default=10,
help='How often to evaluate the training results.'
)
parser.add_argument(
'--train_batch_size',
type=int,
default=100,
help='How many images to train on at a time.'
)
parser.add_argument(
'--test_batch_size',
type=int,
default=-1,
help="""\
How many images to test on. This test set is only used once, to evaluate
the final accuracy of the model after training completes.
A value of -1 causes the entire test set to be used, which leads to more
stable results across runs.\
"""
)
parser.add_argument(
'--validation_batch_size',
type=int,
default=100,
help="""\
How many images to use in an evaluation batch. This validation set is
used much more often than the test set, and is an early indicator of how
accurate the model is during training.
A value of -1 causes the entire validation set to be used, which leads to
more stable results across training iterations, but may be slower on large
training sets.\
"""
)
parser.add_argument(
'--print_misclassified_test_images',
default=False,
help="""\
Whether to print out a list of all misclassified test images.\
""",
action='store_true'
)
parser.add_argument(
'--model_dir',
type=str,
default='tmp/imagenet',
help="""\
Path to classify_image_graph_def.pb,
imagenet_synset_to_human_label_map.txt, and
imagenet_2012_challenge_label_map_proto.pbtxt.\
"""
)
parser.add_argument(
'--bottleneck_dir',
type=str,
default='tmp/bottleneck',
help='Path to cache bottleneck layer values as files.'
)
parser.add_argument(
'--final_tensor_name',
type=str,
default='final_result',
help="""\
The name of the output classification layer in the retrained graph.\
"""
)
parser.add_argument(
'--flip_left_right',
default=False,
help="""\
Whether to randomly flip half of the training images horizontally.\
""",
action='store_true'
)
parser.add_argument(
'--random_crop',
type=int,
default=0,
help="""\
A percentage determining how much of a margin to randomly crop off the
training images.\
"""
)
parser.add_argument(
'--random_scale',
type=int,
default=0,
help="""\
A percentage determining how much to randomly scale up the size of the
training images by.\
"""
)
parser.add_argument(
'--random_brightness',
type=int,
default=0,
help="""\
A percentage determining how much to randomly multiply the training image
input pixels up or down by.\
"""
)
parser.add_argument(
'--center_loss_alpha',
type=float,
default=0,
help="""\
A percentage determining how much to randomly multiply the training image
input pixels up or down by.\
"""
)
parser.add_argument(
'--center_loss_factor',
type=float,
default=0,
help="""\
A percentage determining how much to randomly multiply the training image
input pixels up or down by.\
"""
)
parser.add_argument(
'--architecture',
type=str,
default='inception_v3',
help="""\
Which model architecture to use. 'inception_v3' is the most accurate, but
also the slowest. For faster or smaller models, chose a MobileNet with the
form 'mobilenet_<parameter size>_<input_size>[_quantized]'. For example,
'mobilenet_1.0_224' will pick a model that is 17 MB in size and takes 224
pixel input images, while 'mobilenet_0.25_128_quantized' will choose a much
less accurate, but smaller and faster network that's 920 KB on disk and
takes 128x128 images. See https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html
for more information on Mobilenet.\
""")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| mit |
nhejazi/scikit-learn | sklearn/linear_model/least_angle.py | 6 | 58438 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y, deprecated
from ..model_selection import check_cv
from ..exceptions import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
from ..externals.six import string_types
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False, positive=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
positive : boolean (default=False)
Restrict coefficients to be >= 0.
When using this option together with method 'lasso' the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha (neither will they when using method 'lar'
..). Only coefficients up to the smallest alpha value
(``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by the
stepwise Lars-Lasso algorithm are typically in congruence with the
solution of the coordinate descent lasso_path function.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://statweb.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<https://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<https://en.wikipedia.org/wiki/Lasso_(statistics)>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None or Gram is False:
Gram = None
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif isinstance(Gram, string_types) and Gram == 'auto' or Gram is True:
if Gram is True or X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
else:
Gram = None
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
if positive:
sign_active[n_active] = np.ones_like(C_)
else:
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by
# the test suite. The `equality_tolerance` margin added in 0.16
# to get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e.'
' Reduce max_iter or increase eps parameters.'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny32))
if positive:
gamma_ = min(g1, C / AA)
else:
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny32))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
coefs[-add_features:] = 0
alphas = np.resize(alphas, n_iter + add_features)
alphas[-add_features:] = 0
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> reg = linear_model.Lars(n_nonzero_coefs=1)
>>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(reg.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.positive = positive
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self, precompute, X, y):
if (not hasattr(precompute, '__array__')) and (
(precompute is True) or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
precompute = np.dot(X.T, X)
return precompute
def _fit(self, X, y, max_iter, alpha, fit_path, Xy=None):
"""Auxiliary method to fit the model using X, y as training data"""
n_features = X.shape[1]
X, y, X_offset, y_offset, X_scale = self._preprocess_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
Gram = self._get_gram(self.precompute, X, y)
self.alphas_ = []
self.n_iter_ = []
self.coef_ = np.empty((n_targets, n_features))
if fit_path:
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True, positive=self.positive)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_[k] = coef_path[:, -1]
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True,
positive=self.positive)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_offset, y_offset, X_scale)
return self
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
self._fit(X, y, max_iter=max_iter, alpha=alpha, fit_path=self.fit_path,
Xy=Xy)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients will not converge
to the ordinary-least-squares solution for small values of alpha.
Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> reg = linear_model.LassoLars(alpha=0.01)
>>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, positive=False,
precompute='auto', verbose=False)
>>> print(reg.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
method = 'lasso'
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.positive = positive
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps, positive=False):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps,
positive=positive)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
max_iter : integer, optional
Maximum number of iterations to perform.
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram matrix
cannot be passed as argument since we will use only subsets of X.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True, positive=False):
self.max_iter = max_iter
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
super(LarsCV, self).__init__(fit_intercept=fit_intercept,
verbose=verbose, normalize=normalize,
precompute=precompute,
n_nonzero_coefs=500,
eps=eps, copy_X=copy_X, fit_path=True,
positive=positive)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True)
X = as_float_array(X, copy=self.copy_X)
y = as_float_array(y, copy=self.copy_X)
# init cross-validation generator
cv = check_cv(self.cv, classifier=False)
# As we use cross-validation, the Gram matrix is not precomputed here
Gram = self.precompute
if hasattr(Gram, '__array__'):
warnings.warn("Parameter 'precompute' cannot be an array in "
"%s. Automatically switch to 'auto' instead."
% self.__class__.__name__)
Gram = 'auto'
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps, positive=self.positive)
for train, test in cv.split(X, y))
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
self._fit(X, y, max_iter=self.max_iter, alpha=best_alpha,
Xy=None, fit_path=True)
return self
@property
@deprecated("Attribute alpha is deprecated in 0.19 and "
"will be removed in 0.21. See ``alpha_`` instead")
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
@property
@deprecated("Attribute ``cv_mse_path_`` is deprecated in 0.18 and "
"will be removed in 0.20. Use ``mse_path_`` instead")
def cv_mse_path_(self):
return self.mse_path_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
max_iter : integer, optional
Maximum number of iterations to perform.
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram matrix
cannot be passed as argument since we will use only subsets of X.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsCV only makes sense for problems where
a sparse solution is expected and/or reached.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True, positive=False):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.max_iter = max_iter
self.normalize = normalize
self.precompute = precompute
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
self.copy_X = copy_X
self.positive = positive
# XXX : we don't use super(LarsCV, self).__init__
# to avoid setting n_nonzero_coefs
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsIC only makes sense for problems where
a sparse solution is expected and/or reached.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criterion is
chosen. This value is larger by a factor of ``n_samples`` compared to
Eqns. 2.15 and 2.16 in (Zou et al, 2007).
Examples
--------
>>> from sklearn import linear_model
>>> reg = linear_model.LassoLarsIC(criterion='bic')
>>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(reg.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
https://en.wikipedia.org/wiki/Akaike_information_criterion
https://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, positive=False):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
self.fit_path = True
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values. Will be cast to X's dtype if necessary
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self.precompute
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True, positive=self.positive)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
sigma2 = np.var(y)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
eps64 = np.finfo('float64').eps
self.criterion_ = (n_samples * mean_squared_error / (sigma2 + eps64) +
K * df) # Eqns. 2.15--16 in (Zou et al, 2007)
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause |
quoniammm/happy-machine-learning | Udacity-ML/boston_housing-master_0/visuals.py | 6 | 5008 | ###########################################
# Suppress matplotlib user warnings
# Necessary for newer version of matplotlib
import warnings
warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib")
#
# Display inline matplotlib plots with IPython
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline')
###########################################
import matplotlib.pyplot as pl
import numpy as np
from sklearn.model_selection import learning_curve, validation_curve
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import ShuffleSplit, train_test_split
def ModelLearning(X, y):
""" Calculates the performance of several models with varying sizes of training data.
The learning and testing scores for each model are then plotted. """
# Create 10 cross-validation sets for training and testing
cv = ShuffleSplit(n_splits = 10, test_size = 0.2, random_state = 0)
# Generate the training set sizes increasing by 50
train_sizes = np.rint(np.linspace(1, X.shape[0]*0.8 - 1, 9)).astype(int)
# Create the figure window
fig = pl.figure(figsize=(10,7))
# Create three different models based on max_depth
for k, depth in enumerate([1,3,6,10]):
# Create a Decision tree regressor at max_depth = depth
regressor = DecisionTreeRegressor(max_depth = depth)
# Calculate the training and testing scores
sizes, train_scores, test_scores = learning_curve(regressor, X, y, \
cv = cv, train_sizes = train_sizes, scoring = 'r2')
# Find the mean and standard deviation for smoothing
train_std = np.std(train_scores, axis = 1)
train_mean = np.mean(train_scores, axis = 1)
test_std = np.std(test_scores, axis = 1)
test_mean = np.mean(test_scores, axis = 1)
# Subplot the learning curve
ax = fig.add_subplot(2, 2, k+1)
ax.plot(sizes, train_mean, 'o-', color = 'r', label = 'Training Score')
ax.plot(sizes, test_mean, 'o-', color = 'g', label = 'Testing Score')
ax.fill_between(sizes, train_mean - train_std, \
train_mean + train_std, alpha = 0.15, color = 'r')
ax.fill_between(sizes, test_mean - test_std, \
test_mean + test_std, alpha = 0.15, color = 'g')
# Labels
ax.set_title('max_depth = %s'%(depth))
ax.set_xlabel('Number of Training Points')
ax.set_ylabel('Score')
ax.set_xlim([0, X.shape[0]*0.8])
ax.set_ylim([-0.05, 1.05])
# Visual aesthetics
ax.legend(bbox_to_anchor=(1.05, 2.05), loc='lower left', borderaxespad = 0.)
fig.suptitle('Decision Tree Regressor Learning Performances', fontsize = 16, y = 1.03)
fig.tight_layout()
fig.show()
def ModelComplexity(X, y):
""" Calculates the performance of the model as model complexity increases.
The learning and testing errors rates are then plotted. """
# Create 10 cross-validation sets for training and testing
cv = ShuffleSplit(n_splits = 10, test_size = 0.2, random_state = 0)
# Vary the max_depth parameter from 1 to 10
max_depth = np.arange(1,11)
# Calculate the training and testing scores
train_scores, test_scores = validation_curve(DecisionTreeRegressor(), X, y, \
param_name = "max_depth", param_range = max_depth, cv = cv, scoring = 'r2')
# Find the mean and standard deviation for smoothing
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
# Plot the validation curve
pl.figure(figsize=(7, 5))
pl.title('Decision Tree Regressor Complexity Performance')
pl.plot(max_depth, train_mean, 'o-', color = 'r', label = 'Training Score')
pl.plot(max_depth, test_mean, 'o-', color = 'g', label = 'Validation Score')
pl.fill_between(max_depth, train_mean - train_std, \
train_mean + train_std, alpha = 0.15, color = 'r')
pl.fill_between(max_depth, test_mean - test_std, \
test_mean + test_std, alpha = 0.15, color = 'g')
# Visual aesthetics
pl.legend(loc = 'lower right')
pl.xlabel('Maximum Depth')
pl.ylabel('Score')
pl.ylim([-0.05,1.05])
pl.show()
def PredictTrials(X, y, fitter, data):
""" Performs trials of fitting and predicting data. """
# Store the predicted prices
prices = []
for k in range(10):
# Split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, \
test_size = 0.2, random_state = k)
# Fit the data
reg = fitter(X_train, y_train)
# Make a prediction
pred = reg.predict([data[0]])[0]
prices.append(pred)
# Result
print "Trial {}: ${:,.2f}".format(k+1, pred)
# Display price range
print "\nRange in prices: ${:,.2f}".format(max(prices) - min(prices)) | mit |
ZENGXH/scikit-learn | examples/applications/plot_stock_market.py | 227 | 8284 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
nhejazi/scikit-learn | sklearn/utils/validation.py | 3 | 27241 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..utils.fixes import signature
from .. import get_config as _get_config
from ..exceptions import NonBLASDotWarning
from ..exceptions import NotFittedError
from ..exceptions import DataConversionWarning
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
if _get_config()['assume_finite']:
return
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Parameters
----------
X : array or sparse matrix
"""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats.
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
if X.dtype.kind in 'uib' and X.dtype.itemsize <= 4:
return_dtype = np.float32
else:
return_dtype = np.float64
return X.astype(return_dtype)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit') and callable(x.fit):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent representation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
lengths = [_num_samples(X) for X in arrays if X is not None]
uniques = np.unique(lengths)
if len(uniques) > 1:
raise ValueError("Found input variables with inconsistent numbers of"
" samples: %r" % [int(l) for l in lengths])
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, boolean or list/tuple of strings
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). If the input is sparse but
not in the allowed format, it will be converted to the first listed
format. True allows the input to be any format. False means
that a sparse matrix input will raise an error.
dtype : string, type or None
Data type of result. If None, the dtype of the input is preserved.
copy : boolean
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if isinstance(accept_sparse, six.string_types):
accept_sparse = [accept_sparse]
if accept_sparse is False:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
elif isinstance(accept_sparse, (list, tuple)):
if len(accept_sparse) == 0:
raise ValueError("When providing 'accept_sparse' "
"as a tuple or list, it must contain at "
"least one string value.")
# ensure correct sparse format
if spmatrix.format not in accept_sparse:
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
elif accept_sparse is not True:
# any other type
raise ValueError("Parameter 'accept_sparse' should be a string, "
"boolean or list of strings. You provided "
"'accept_sparse={}'.".format(accept_sparse))
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=False, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2D numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, boolean or list/tuple of strings (default=False)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
.. deprecated:: 0.19
Passing 'None' to parameter ``accept_sparse`` in methods is
deprecated in version 0.19 "and will be removed in 0.21. Use
``accept_sparse=False`` instead.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to raise a value error if X is not 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
# accept_sparse 'None' deprecation check
if accept_sparse is None:
warnings.warn(
"Passing 'None' to parameter 'accept_sparse' in methods "
"check_array and check_X_y is deprecated in version 0.19 "
"and will be removed in 0.21. Use 'accept_sparse=False' "
" instead.", DeprecationWarning)
accept_sparse = False
# store whether originally we wanted numeric dtype
dtype_numeric = isinstance(dtype, six.string_types) and dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if estimator is not None:
if isinstance(estimator, six.string_types):
estimator_name = estimator
else:
estimator_name = estimator.__class__.__name__
else:
estimator_name = "Estimator"
context = " by %s" % estimator_name if estimator is not None else ""
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
raise ValueError(
"Expected 2D array, got 1D array instead:\narray={}.\n"
"Reshape your data either using array.reshape(-1, 1) if "
"your data has a single feature or array.reshape(1, -1) "
"if it contains a single sample.".format(array))
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. %s expected <= 2."
% (array.ndim, estimator_name))
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, shape_repr, ensure_min_samples,
context))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, shape_repr, ensure_min_features,
context))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s%s."
% (dtype_orig, array.dtype, context))
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=False, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2d and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, boolean or list of string (default=False)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
.. deprecated:: 0.19
Passing 'None' to parameter ``accept_sparse`` in methods is
deprecated in version 0.19 "and will be removed in 0.21. Use
``accept_sparse=False`` instead.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X. This parameter
does not influence whether y can have np.inf or np.nan values.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
Parameters
----------
seed : None | int | instance of RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Parameters
----------
estimator : object
An estimator to inspect.
parameter: str
The searched parameter.
Returns
-------
is_parameter: bool
Whether the parameter was found to be a named parameter of the
estimator's fit method.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in signature(estimator.fit).parameters
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg.:
``["coef_", "estimator_", ...], "coef_"``
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
Returns
-------
None
Raises
------
NotFittedError
If the attributes are not found.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
| bsd-3-clause |
shusenl/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
petosegan/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 307 | 1974 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
xwolf12/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
xavierwu/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 254 | 2005 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
Garrett-R/scikit-learn | sklearn/cross_decomposition/pls_.py | 5 | 28683 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <edouard.duchesnay@cea.fr>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights))
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
## 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights))
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / np.dot(y_weights.T, y_weights)
## y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coefs: array, [p, q]
The coefficients of the linear model: Y = X coefs + Err
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float, copy=self.copy)
Y = check_array(Y, dtype=np.float, copy=self.copy)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if n != Y.shape[0]:
raise ValueError(
'Incompatible shapes: X has %s samples, while Y '
'has %s' % (X.shape[0], Y.shape[0]))
if self.n_components < 1 or self.n_components > p:
raise ValueError('invalid number of components')
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if not self.deflation_mode in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
#1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
#2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.inv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.inv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coefs = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coefs = (1. / self.x_std_.reshape((p, 1)) * self.coefs *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
# Normalize
if copy:
Xc = (np.asarray(X) - self.x_mean_) / self.x_std_
if Y is not None:
Yc = (np.asarray(Y) - self.y_mean_) / self.y_std_
else:
X = np.asarray(X)
Xc -= self.x_mean_
Xc /= self.x_std_
if Y is not None:
Y = np.asarray(Y)
Yc -= self.y_mean_
Yc /= self.y_std_
# Apply rotation
x_scores = np.dot(Xc, self.x_rotations_)
if Y is not None:
y_scores = np.dot(Yc, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
# Normalize
if copy:
Xc = (np.asarray(X) - self.x_mean_)
else:
X = np.asarray(X)
Xc -= self.x_mean_
Xc /= self.x_std_
Ypred = np.dot(Xc, self.coefs)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q]
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coefs: array, [p, q]
The coefficients of the linear model: Y = X coefs + Err
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples is the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q]
Training vectors, where n_samples is the number of samples and
q is the number of response variables.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vector, where n_samples is the number of samples and
p is the number of predictors. X will be centered before any analysis.
Y : array-like of response, shape = [n_samples, q]
Training vector, where n_samples is the number of samples and
q is the number of response variables. X will be centered before any
analysis.
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale X and Y.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float, copy=self.copy)
Y = check_array(Y, dtype=np.float, copy=self.copy)
p = X.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('invalid number of components')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components == C.shape[1]:
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| bsd-3-clause |
mixturemodel-flow/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder_test.py | 71 | 12923 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DataFeeder`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class DataFeederTest(test.TestCase):
# pylint: disable=undefined-variable
"""Tests for `DataFeeder`."""
def _wrap_dict(self, data, prepend=''):
return {prepend + '1': data, prepend + '2': data}
def _assert_raises(self, input_data):
with self.assertRaisesRegexp(TypeError, 'annot convert'):
data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
def test_input_uint32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint32)
self._assert_raises(data)
self._assert_raises(self._wrap_dict(data))
def test_input_uint64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint64)
self._assert_raises(data)
self._assert_raises(self._wrap_dict(data))
def _assert_dtype(self, expected_np_dtype, expected_tf_dtype, input_data):
feeder = data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
if isinstance(input_data, dict):
for k, v in list(feeder.input_dtype.items()):
self.assertEqual(expected_np_dtype, v)
else:
self.assertEqual(expected_np_dtype, feeder.input_dtype)
with ops.Graph().as_default() as g, self.test_session(g):
inp, _ = feeder.input_builder()
if isinstance(inp, dict):
for k, v in list(inp.items()):
self.assertEqual(expected_tf_dtype, v.dtype)
else:
self.assertEqual(expected_tf_dtype, inp.dtype)
def test_input_int8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int8)
self._assert_dtype(np.int8, dtypes.int8, data)
self._assert_dtype(np.int8, dtypes.int8, self._wrap_dict(data))
def test_input_int16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int16)
self._assert_dtype(np.int16, dtypes.int16, data)
self._assert_dtype(np.int16, dtypes.int16, self._wrap_dict(data))
def test_input_int32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int32)
self._assert_dtype(np.int32, dtypes.int32, data)
self._assert_dtype(np.int32, dtypes.int32, self._wrap_dict(data))
def test_input_int64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int64)
self._assert_dtype(np.int64, dtypes.int64, data)
self._assert_dtype(np.int64, dtypes.int64, self._wrap_dict(data))
def test_input_uint8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint8)
self._assert_dtype(np.uint8, dtypes.uint8, data)
self._assert_dtype(np.uint8, dtypes.uint8, self._wrap_dict(data))
def test_input_uint16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint16)
self._assert_dtype(np.uint16, dtypes.uint16, data)
self._assert_dtype(np.uint16, dtypes.uint16, self._wrap_dict(data))
def test_input_float16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float16)
self._assert_dtype(np.float16, dtypes.float16, data)
self._assert_dtype(np.float16, dtypes.float16, self._wrap_dict(data))
def test_input_float32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float32)
self._assert_dtype(np.float32, dtypes.float32, data)
self._assert_dtype(np.float32, dtypes.float32, self._wrap_dict(data))
def test_input_float64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float64)
self._assert_dtype(np.float64, dtypes.float64, data)
self._assert_dtype(np.float64, dtypes.float64, self._wrap_dict(data))
def test_input_bool(self):
data = np.array([[False for _ in xrange(2)] for _ in xrange(2)])
self._assert_dtype(np.bool, dtypes.bool, data)
self._assert_dtype(np.bool, dtypes.bool, self._wrap_dict(data))
def test_input_string(self):
input_data = np.array([['str%d' % i for i in xrange(2)] for _ in xrange(2)])
self._assert_dtype(input_data.dtype, dtypes.string, input_data)
self._assert_dtype(input_data.dtype, dtypes.string,
self._wrap_dict(input_data))
def _assertAllClose(self, src, dest, src_key_of=None, src_prop=None):
def func(x):
val = getattr(x, src_prop) if src_prop else x
return val if src_key_of is None else src_key_of[val]
if isinstance(src, dict):
for k in list(src.keys()):
self.assertAllClose(func(src[k]), dest)
else:
self.assertAllClose(func(src), dest)
def test_unsupervised(self):
def func(feeder):
with self.test_session():
inp, _ = feeder.input_builder()
feed_dict_fn = feeder.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[1, 2]], feed_dict, 'name')
data = np.matrix([[1, 2], [2, 3], [3, 4]])
func(data_feeder.DataFeeder(data, None, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data), None, n_classes=0, batch_size=1))
def test_data_feeder_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [2, 1], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
def test_epoch(self):
def func(feeder):
with self.test_session():
feeder.input_builder()
epoch = feeder.make_epoch_variable()
feed_dict_fn = feeder.get_feed_dict_fn()
# First input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Second input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Third input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Back to the first input again, so new epoch.
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [1])
data = np.matrix([[1, 2], [2, 3], [3, 4]])
labels = np.array([0, 0, 1])
func(data_feeder.DataFeeder(data, labels, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data, 'in'),
self._wrap_dict(labels, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=1))
def test_data_feeder_multioutput_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [[3, 4], [1, 2]], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[1, 2], [3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
def test_data_feeder_multioutput_classification(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(
out, [[[0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]],
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]], feed_dict,
'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[0, 1, 2], [2, 3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=5, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(5, 'out'),
batch_size=2))
def test_streaming_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[[1, 2]], [[3, 4]]], feed_dict, 'name')
self._assertAllClose(out, [[[1], [2]], [[2], [2]]], feed_dict, 'name')
def x_iter(wrap_dict=False):
yield np.array([[1, 2]]) if not wrap_dict else self._wrap_dict(
np.array([[1, 2]]), 'in')
yield np.array([[3, 4]]) if not wrap_dict else self._wrap_dict(
np.array([[3, 4]]), 'in')
def y_iter(wrap_dict=False):
yield np.array([[1], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[1], [2]]), 'out')
yield np.array([[2], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[2], [2]]), 'out')
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=2))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
# Test non-full batches.
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=10))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=10))
def test_dask_data_feeder(self):
if HAS_PANDAS and HAS_DASK:
x = pd.DataFrame(
dict(
a=np.array([.1, .3, .4, .6, .2, .1, .6]),
b=np.array([.7, .8, .1, .2, .5, .3, .9])))
x = dd.from_pandas(x, npartitions=2)
y = pd.DataFrame(dict(labels=np.array([1, 0, 2, 1, 0, 1, 2])))
y = dd.from_pandas(y, npartitions=2)
# TODO(ipolosukhin): Remove or restore this.
# x = extract_dask_data(x)
# y = extract_dask_labels(y)
df = data_feeder.DaskDataFeeder(x, y, n_classes=2, batch_size=2)
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[inp.name], [[0.40000001, 0.1],
[0.60000002, 0.2]])
self.assertAllClose(feed_dict[out.name], [[0., 0., 1.], [0., 1., 0.]])
def test_hdf5_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self.assertAllClose(out, [2, 1], feed_dict, 'name')
try:
import h5py # pylint: disable=g-import-not-at-top
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
h5f = h5py.File('test_hdf5.h5', 'w')
h5f.create_dataset('x', data=x)
h5f.create_dataset('y', data=y)
h5f.close()
h5f = h5py.File('test_hdf5.h5', 'r')
x = h5f['x']
y = h5f['y']
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
except ImportError:
print("Skipped test for hdf5 since it's not installed.")
class SetupPredictDataFeederTest(DataFeederTest):
"""Tests for `DataFeeder.setup_predict_data_feeder`."""
def test_iterable_data(self):
# pylint: disable=undefined-variable
def func(df):
self._assertAllClose(six.next(df), [[1, 2], [3, 4]])
self._assertAllClose(six.next(df), [[5, 6]])
data = [[1, 2], [3, 4], [5, 6]]
x = iter(data)
x_dict = iter([self._wrap_dict(v) for v in iter(data)])
func(data_feeder.setup_predict_data_feeder(x, batch_size=2))
func(data_feeder.setup_predict_data_feeder(x_dict, batch_size=2))
if __name__ == '__main__':
test.main()
| apache-2.0 |
madjelan/scikit-learn | sklearn/preprocessing/data.py | 113 | 56747 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
Carmezim/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator.py | 5 | 55283 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
import os
import tempfile
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import summary_io
from tensorflow.python.util import compat
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existance of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if contrib_framework.is_tensor(x) or (y is not None and
contrib_framework.is_tensor(y)):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _model_fn_args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments
"""
_, fn = tf_decorator.unwrap(fn)
if hasattr(fn, 'func') and hasattr(fn, 'keywords') and hasattr(fn, 'args'):
# Handle functools.partial and similar objects.
return tuple([
arg for arg in tf_inspect.getargspec(fn.func).args[len(fn.args):]
if arg not in set(fn.keywords.keys())
])
# Handle function.
return tuple(tf_inspect.getargspec(fn).args)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableOfTensors', 'MutableDenseHashTable'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas, worker_device=worker_device,
merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics, predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v) for k, v in sorted(dictionary.items()))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = summary_io.SummaryWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
value = summary_proto.value.add()
value.tag = key
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
value.simple_value = float(dictionary[key])
else:
logging.warn('Skipping summary for %s, must be a float or np.float32.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
class BaseEstimator(
sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Users should not instantiate or subclass this class. Instead, use `Estimator`.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overriden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same.
config: A RunConfig instance.
"""
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(b/9965722): remove this suppression after it is no longer
# necessary.
# pylint: disable=g-doc-exception
raise ValueError(
"model_dir are set both in constructor and RunConfig, but with "
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def partial_fit(
self, x=None, y=None, input_fn=None, steps=1, batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('batch_size', None), ('as_iterable', True)
)
def predict(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def export(self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None,
checkpoint_path=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
checkpoint_path: the checkpoint path of the model to be exported. If it is
`None` (which is default), will use the latest checkpoint in
export_dir.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep,
checkpoint_path=checkpoint_path)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.',
str(labels), str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_results = self._get_eval_ops(features, labels, metrics)
eval_dict = model_fn_results.eval_metric_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
# We need to copy the hook array as we modify it, thus [:].
hooks = hooks[:] if hooks else []
if feed_fn:
hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn))
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(
steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=model_fn_results.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=self._session_config)
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=infer_ops.scaffold,
config=self._session_config))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_ops = self._get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
all_hooks.extend(hooks)
scaffold = model_fn_ops.scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
defer_build=True,
save_relative_paths=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config
) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
summary_io.SummaryWriterCache.clear()
return loss
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _model_fn_args(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) has less than 4 '
'arguments, but not None params (%s) are passed.' %
(model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _model_fn_args(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
return model_fn_results
# Here model_fn_results should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL)
features, labels = self._feature_engineering_fn(features, labels)
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
metrics, features, labels, model_fn_ops.predictions))
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.streaming_mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
def export_savedmodel(
self, export_dir_base, serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
serving_input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified. Not needed for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
if serving_input_fn is None:
raise ValueError('serving_input_fn must be defined.')
with ops.Graph().as_default() as g:
contrib_variables.create_global_step(g)
# Call the serving_input_fn and collect the input alternatives.
input_ops = serving_input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# TODO(b/34388557) This is a stopgap, pending recording model provenance.
# Record which features are expected at serving time. It is assumed that
# these are the features that were used in training.
for feature_key in input_ops.features.keys():
ops.add_to_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS, feature_key)
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
# Build the SignatureDefs from all pairs of input and output alternatives
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
if (model_fn_ops.scaffold is not None and
model_fn_ops.scaffold.saver is not None):
saver_for_restore = model_fn_ops.scaffold.saver
else:
saver_for_restore = saver.Saver(sharded=True)
with tf_session.Session('') as session:
saver_for_restore.restore(session, checkpoint_path)
init_op = control_flow_ops.group(
variables.local_variables_initializer(),
resources.initialize_resources(resources.shared_resources()),
lookup_ops.tables_initializer())
# Perform the export
builder = saved_model_builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(
session, [tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op)
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
return export_dir
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator."""
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None,
feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name='score')
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=None, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate(
[output[key] for output in results], axis=0)
for key in results[0]
}
| apache-2.0 |
josenavas/labman | labman/db/process.py | 1 | 115578 | # ----------------------------------------------------------------------------
# Copyright (c) 2017-, labman development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from datetime import date, datetime
from io import StringIO
from itertools import chain
import re
from json import dumps
import numpy as np
import pandas as pd
from . import base
from . import sql_connection
from . import user as user_module
from . import plate as plate_module
from . import container as container_module
from . import composition as composition_module
from . import equipment as equipment_module
from .study import Study
class Process(base.LabmanObject):
"""Base process object
Attributes
----------
id
date
personnel
"""
@staticmethod
def factory(process_id):
"""Initializes the correct Process subclass
Parameters
----------
process_id : int
The process id
Returns
-------
An instance of a subclass of Process
"""
factory_classes = {
# 'primer template creation': TODO,
'primer working plate creation': PrimerWorkingPlateCreationProcess,
'sample plating': SamplePlatingProcess,
'reagent creation': ReagentCreationProcess,
'gDNA extraction': GDNAExtractionProcess,
'16S library prep': LibraryPrep16SProcess,
'shotgun library prep': LibraryPrepShotgunProcess,
'quantification': QuantificationProcess,
'gDNA normalization': NormalizationProcess,
'compress gDNA plates': GDNAPlateCompressionProcess,
'pooling': PoolingProcess,
'sequencing': SequencingProcess}
with sql_connection.TRN as TRN:
sql = """SELECT description
FROM qiita.process_type
JOIN qiita.process USING (process_type_id)
WHERE process_id = %s"""
TRN.add(sql, [process_id])
p_type = TRN.execute_fetchlast()
constructor = factory_classes[p_type]
if constructor._table == 'qiita.process':
instance = constructor(process_id)
else:
sql = """SELECT {}
FROM {}
WHERE process_id = %s""".format(
constructor._id_column, constructor._table)
TRN.add(sql, [process_id])
subclass_id = TRN.execute_fetchlast()
instance = constructor(subclass_id)
return instance
@classmethod
def _common_creation_steps(cls, user, process_date=None):
if process_date is None:
process_date = date.today()
with sql_connection.TRN as TRN:
sql = """SELECT process_type_id
FROM qiita.process_type
WHERE description = %s"""
TRN.add(sql, [cls._process_type])
pt_id = TRN.execute_fetchlast()
sql = """INSERT INTO qiita.process
(process_type_id, run_date, run_personnel_id)
VALUES (%s, %s, %s)
RETURNING process_id"""
TRN.add(sql, [pt_id, process_date, user.id])
p_id = TRN.execute_fetchlast()
return p_id
def _get_process_attr(self, attr):
"""Returns the value of the given process attribute
Parameters
----------
attr : str
The attribute to retrieve
Returns
-------
Object
The attribute
"""
with sql_connection.TRN as TRN:
sql = """SELECT {}
FROM qiita.process
JOIN {} USING (process_id)
WHERE {} = %s""".format(attr, self._table,
self._id_column)
TRN.add(sql, [self.id])
return TRN.execute_fetchlast()
@property
def date(self):
return self._get_process_attr('run_date')
@property
def personnel(self):
return user_module.User(self._get_process_attr('run_personnel_id'))
@property
def process_id(self):
return self._get_process_attr('process_id')
@property
def plates(self):
"""The plates being extracted by this process
Returns
-------
plate : list of labman.db.Plate
The extracted plates
"""
with sql_connection.TRN as TRN:
sql = """SELECT DISTINCT plate_id
FROM qiita.container
LEFT JOIN qiita.well USING (container_id)
WHERE latest_upstream_process_id = %s
ORDER BY plate_id"""
TRN.add(sql, [self.process_id])
plate_ids = TRN.execute_fetchflatten()
return [plate_module.Plate(plate_id) for plate_id in plate_ids]
class _Process(Process):
"""Process object
Not all processes have a specific subtable, so we need to override the
date and personnel attributes
Attributes
----------
id
date
personnel
"""
_table = 'qiita.process'
_id_column = 'process_id'
@property
def date(self):
return self._get_attr('run_date')
@property
def personnel(self):
return user_module.User(self._get_attr('run_personnel_id'))
@property
def process_id(self):
return self._get_attr('process_id')
class SamplePlatingProcess(_Process):
"""Sample plating process"""
_process_type = 'sample plating'
@classmethod
def create(cls, user, plate_config, plate_ext_id, volume=None):
"""Creates a new sample plating process
Parameters
----------
user : labman.db.user.User
User performing the plating
plate_config : labman.db.PlateConfiguration
The sample plate configuration
plate_ext_id : str
The external plate id
volume : float, optional
Starting well volume
Returns
-------
SamplePlatingProcess
"""
with sql_connection.TRN:
volume = volume if volume else 0
# Add the row to the process table
instance = cls(cls._common_creation_steps(user))
# Create the plate
plate = plate_module.Plate.create(plate_ext_id, plate_config)
# By definition, all well plates are blank at the beginning
# so populate all the wells in the plate with BLANKS
for i in range(plate_config.num_rows):
for j in range(plate_config.num_columns):
well = container_module.Well.create(
plate, instance, volume, i + 1, j + 1)
composition_module.SampleComposition.create(
instance, well, volume)
return instance
@property
def plate(self):
"""The plate being plated by this process
Returns
-------
plate : labman.db.Plate
The plate being plated
"""
with sql_connection.TRN as TRN:
sql = """SELECT DISTINCT plate_id
FROM qiita.container
LEFT JOIN qiita.well USING (container_id)
LEFT JOIN qiita.plate USING (plate_id)
WHERE latest_upstream_process_id = %s"""
TRN.add(sql, [self.id])
plate_id = TRN.execute_fetchlast()
return plate_module.Plate(plate_id)
def update_well(self, row, col, content):
"""Updates the content of a well
Parameters
----------
row: int
The well row
col: int
The well column
content: str
The new contents of the well
Returns
-------
str
The new contents of the well
"""
return self.plate.get_well(row, col).composition.update(content)
def comment_well(self, row, col, comment):
"""Updates the comment of a well
Parameters
----------
row: int
The well row
col: int
The well column
content: str
The new contents of the well
"""
self.plate.get_well(row, col).composition.notes = comment
class ReagentCreationProcess(_Process):
"""Reagent creation process"""
_process_type = 'reagent creation'
@classmethod
def create(cls, user, external_id, volume, reagent_type):
"""Creates a new reagent creation process
Parameters
----------
user : labman.db.user.User
User adding the reagent to the system
external_id: str
The external id of the reagent
volume: float
Initial reagent volume
reagent_type : str
The type of the reagent
Returns
-------
ReagentCreationProce
"""
with sql_connection.TRN:
# Add the row to the process table
instance = cls(cls._common_creation_steps(user))
# Create the tube and the composition
tube = container_module.Tube.create(instance, external_id, volume)
composition_module.ReagentComposition.create(
instance, tube, volume, reagent_type, external_id)
return instance
@property
def tube(self):
"""The tube storing the reagent"""
with sql_connection.TRN as TRN:
sql = """SELECT tube_id
FROM qiita.tube
LEFT JOIN qiita.container USING (container_id)
WHERE latest_upstream_process_id = %s"""
TRN.add(sql, [self.process_id])
tube_id = TRN.execute_fetchlast()
return container_module.Tube(tube_id)
class PrimerWorkingPlateCreationProcess(Process):
"""Primer working plate creation process object
Attributes
----------
primer_set
master_set_order_number
"""
_table = 'qiita.primer_working_plate_creation_process'
_id_column = 'primer_working_plate_creation_process_id'
_process_type = 'primer working plate creation'
@classmethod
def create(cls, user, primer_set, master_set_order, creation_date=None):
"""Creates a new set of working primer plates
Parameters
----------
user : labman.db.user.User
User creating the new set of primer plates
primer_set : labman.composition.PrimerSet
The primer set
master_set_order : str
The master set order
creation_date: datetime.date, optional
The creation date. Default: today
Returns
-------
PrimerWorkingPlateCreationProcess
"""
with sql_connection.TRN as TRN:
# Add the row to the process table
process_id = cls._common_creation_steps(
user, process_date=creation_date)
sql = """INSERT INTO qiita.primer_working_plate_creation_process
(process_id, primer_set_id, master_set_order_number)
VALUES (%s, %s, %s)
RETURNING primer_working_plate_creation_process_id"""
TRN.add(sql, [process_id, primer_set.id, master_set_order])
instance = cls(TRN.execute_fetchlast())
creation_date = instance.date
plate_name_suffix = creation_date.strftime('%Y-%m-%d')
primer_set_plates = primer_set.plates
check_name = '%s %s' % (primer_set_plates[0].external_id,
plate_name_suffix)
if plate_module.Plate.external_id_exists(check_name):
# The likelihood of this happening in the real system is really
# low, but better be safe than sorry
plate_name_suffix = datetime.now().strftime('%Y-%m-%d %H:%M')
for ps_plate in primer_set_plates:
# Create a new working primer plate
plate_name = '%s %s' % (ps_plate.external_id,
plate_name_suffix)
plate_config = ps_plate.plate_configuration
work_plate = plate_module.Plate.create(
plate_name, plate_config)
# Add the wells to the new plate
for row in ps_plate.layout:
for ps_well in row:
w_well = container_module.Well.create(
work_plate, instance, 10, ps_well.row,
ps_well.column)
composition_module.PrimerComposition.create(
instance, w_well, 10, ps_well.composition)
return instance
@property
def primer_set(self):
"""The primer set template from which the working plates are created
Returns
-------
PrimerSet
"""
return composition_module.PrimerSet(self._get_attr('primer_set_id'))
@property
def master_set_order(self):
"""The master set order
Returns
-------
str
"""
return self._get_attr('master_set_order_number')
class GDNAExtractionProcess(Process):
"""gDNA extraction process object
Attributes
----------
kingfisher
epmotion
epmotion_tool
extraction_kit
sample_plate
volume
See Also
--------
Process
"""
_table = 'qiita.gdna_extraction_process'
_id_column = 'gdna_extraction_process_id'
_process_type = 'gDNA extraction'
@property
def kingfisher(self):
"""The King Fisher robot used during extraction
Returns
-------
Equipment
"""
return equipment_module.Equipment(
self._get_attr('kingfisher_robot_id'))
@property
def epmotion(self):
"""The EpMotion robot used during extraction
Returns
-------
Equipment
"""
return equipment_module.Equipment(self._get_attr('epmotion_robot_id'))
@property
def epmotion_tool(self):
"""The EpMotion tool used during extraction
Returns
-------
Equipment
"""
return equipment_module.Equipment(self._get_attr('epmotion_tool_id'))
@property
def extraction_kit(self):
"""The extraction kit used
Returns
-------
ReagentComposition
"""
return composition_module.ReagentComposition(
self._get_attr('extraction_kit_id'))
@property
def sample_plate(self):
"""The source sample plate
Returns
-------
Plate
"""
with sql_connection.TRN as TRN:
sql = """SELECT DISTINCT plate_id
FROM qiita.composition gc
JOIN qiita.gdna_composition gdc
ON gc.composition_id = gdc.composition_id
JOIN qiita.sample_composition ssc
USING (sample_composition_id)
JOIN qiita.composition sc
ON ssc.composition_id = sc.composition_id
JOIN qiita.well w
ON sc.container_id = w.container_id
WHERE gc.upstream_process_id = %s"""
TRN.add(sql, [self.process_id])
return plate_module.Plate(TRN.execute_fetchlast())
@property
def volume(self):
"""The elution volume
Returns
-------
float
"""
with sql_connection.TRN as TRN:
sql = """SELECT DISTINCT total_volume
FROM qiita.composition
WHERE upstream_process_id = %s"""
TRN.add(sql, [self.process_id])
return TRN.execute_fetchlast()
@classmethod
def create(cls, user, plate, kingfisher, epmotion, epmotion_tool,
extraction_kit, volume, gdna_plate_name, extraction_date=None):
"""Creates a new gDNA extraction process
Parameters
----------
user : labman.db.user.User
User performing the gDNA extraction
plate: labman.db.plate.Plate
The plate being extracted
kingfisher: labman.db.equipment.Equipment
The KingFisher used
epmotion: labman.db.equipment.Equipment
The EpMotion used
epmotion_tool: labman.db.equipment.Equipment
The EpMotion tool used
extraciton_kit: labman.db.composition.ReagentComposition
The extraction kit used
volume : float
The elution extracted
gdna_plate_name : str
The name for the gdna plate
extraction_date : datetime.date, optional
The extraction date. Default: today
Returns
-------
GDNAExtractionProcess
"""
with sql_connection.TRN as TRN:
# Add the row to the process table
process_id = cls._common_creation_steps(
user, process_date=extraction_date)
# Add the row to the gdna_extraction_process table
sql = """INSERT INTO qiita.gdna_extraction_process
(process_id, epmotion_robot_id, epmotion_tool_id,
kingfisher_robot_id, extraction_kit_id)
VALUES (%s, %s, %s, %s, %s)
RETURNING gdna_extraction_process_id"""
TRN.add(sql, [process_id, epmotion.id, epmotion_tool.id,
kingfisher.id, extraction_kit.id])
instance = cls(TRN.execute_fetchlast())
# Create the extracted plate
plate_config = plate.plate_configuration
gdna_plate = plate_module.Plate.create(
gdna_plate_name, plate_config)
plate_layout = plate.layout
# Add the wells to the new plate
for i in range(plate_config.num_rows):
for j in range(plate_config.num_columns):
plated_sample = plate_layout[i][j].composition
if plated_sample.sample_composition_type != 'empty':
well = container_module.Well.create(
gdna_plate, instance, volume, i + 1, j + 1)
composition_module.GDNAComposition.create(
instance, well, volume, plated_sample)
return instance
class GDNAPlateCompressionProcess(Process):
"""Gets 1 to 4 96-well gDNA plates and remaps them in a 384-well plate
The remapping schema follows this strucutre:
A B A B A B A B ...
C D C D C D C D ...
A B A B A B A B ...
C D C D C D C D ...
...
"""
_table = 'qiita.compression_process'
_id_column = 'compression_process_id'
_process_type = "compress gDNA plates"
def _compress_plate(self, out_plate, in_plate, row_pad, col_pad, volume=1):
"""Compresses the 96-well in_plate into the 384-well out_plate"""
with sql_connection.TRN:
layout = in_plate.layout
for row in layout:
for well in row:
if well is not None:
# The row/col pair is stored in the DB starting at 1
# subtract 1 to make it start at 0 so the math works
# and re-add 1 at the end
out_well_row = (((well.row - 1) * 2) + row_pad) + 1
out_well_col = (((well.column - 1) * 2) + col_pad) + 1
out_well = container_module.Well.create(
out_plate, self, volume, out_well_row,
out_well_col)
composition_module.CompressedGDNAComposition.create(
self, out_well, volume, well.composition)
@classmethod
def create(cls, user, plates, plate_ext_id, robot):
"""Creates a new gDNA compression process
Parameters
----------
user : labman.db.user.User
User performing the plating
plates: list of labman.db.plate.Plate
The plates to compress
plate_ext_id : str
The external plate id
robot: Equipment
The robot performing the compression
Raises
------
ValueError
Returns
-------
GDNAPlateCompressionProcess
"""
if not (1 <= len(plates) <= 4):
raise ValueError(
'Cannot compress %s gDNA plates. Please provide 1 to 4 '
'gDNA plates' % len(plates))
with sql_connection.TRN as TRN:
# Add the row to the process table
process_id = cls._common_creation_steps(user)
# Add the row to the compression_process table
sql = """INSERT INTO qiita.compression_process
(process_id, robot_id)
VALUES (%s, %s)
RETURNING compression_process_id"""
TRN.add(sql, [process_id, robot.id])
instance = cls(TRN.execute_fetchlast())
# Create the output plate
# Magic number 3 -> 384-well plate
plate = plate_module.Plate.create(
plate_ext_id, plate_module.PlateConfiguration(3))
# Compress the plates
for i, in_plate in enumerate(plates):
row_pad = int(np.floor(i / 2))
col_pad = i % 2
instance._compress_plate(plate, in_plate, row_pad, col_pad)
return instance
@property
def robot(self):
"""The robot performing the compression"""
return equipment_module.Equipment(self._get_attr('robot_id'))
@property
def gdna_plates(self):
"""The input gdna plates"""
with sql_connection.TRN as TRN:
# Rationale: giving the compression algorithm, we only need to look
# at the 4 wells on the top left corner (1, 1), (1, 2), (2, 1) and
# (2, 2), and in that order, to know which plates have been
# compressed
sql = """SELECT gw.plate_id
FROM qiita.composition cc
JOIN qiita.well cw ON cc.container_id = cw.container_id
JOIN qiita.compressed_gdna_composition cgc
ON cc.composition_id = cgc.composition_id
JOIN qiita.gdna_composition gdnac ON
cgc.gdna_composition_id = gdnac.gdna_composition_id
JOIN qiita.composition gc
ON gdnac.composition_id = gc.composition_id
JOIN qiita.well gw ON gc.container_id = gw.container_id
WHERE cc.upstream_process_id = %s AND
cw.row_num IN (1, 2) AND cw.col_num IN (1, 2)
ORDER BY cw.row_num, cw.col_num"""
TRN.add(sql, [self.process_id])
return [plate_module.Plate(pid)
for pid in TRN.execute_fetchflatten()]
class LibraryPrep16SProcess(Process):
"""16S Library Prep process object
Attributes
----------
mastermix_lots
water_lots
epmotions
See Also
--------
Process
"""
_table = 'qiita.library_prep_16s_process'
_id_column = 'library_prep_16s_process_id'
_process_type = '16S library prep'
@classmethod
def create(cls, user, plate, primer_plate, lib_plate_name, epmotion,
epmotion_tool_tm300, epmotion_tool_tm50, master_mix, water_lot,
volume, preparation_date=None):
"""Creates a new 16S library prep process
Parameters
----------
user : labman.db.user.User
User performing the library prep
plate: labman.db.plate.Plate
The plate being prepared for amplicon sequencing
primer_plate: labman.db.plate.Plate
The primer plate
lib_plate_name: str
The name of the prepared plate
epmotion: labman.db.equipment.Equipment
The EpMotion
epmotion_tool_tm300: labman.db.equipment.Equipment
The EpMotion TM300 8 tool
epmotion_tool_tm50: labman.db.equipment.Equipment
The EpMotion TM300 8 tool
master_mix: labman.db.composition.ReagentComposition
The mastermix used
water_lot: labman.db.composition.ReagentComposition
The water lot used
volume : float
The PCR total volume in the wells
preparation_date : datetime.date, optional
The preparation date. Default: today
Returns
-------
LibraryPrep16SProcess
"""
with sql_connection.TRN as TRN:
# Add the row to the process table
process_id = cls._common_creation_steps(
user, process_date=preparation_date)
# Add the row to the library_prep_16s_process
sql = """INSERT INTO qiita.library_prep_16s_process
(process_id, epmotion_robot_id,
epmotion_tm300_8_tool_id, epmotion_tm50_8_tool_id,
master_mix_id, water_lot_id)
VALUES (%s, %s, %s, %s, %s, %s)
RETURNING library_prep_16s_process_id"""
TRN.add(sql, [process_id, epmotion.id, epmotion_tool_tm300.id,
epmotion_tool_tm50.id, master_mix.id, water_lot.id])
instance = cls(TRN.execute_fetchlast())
# Create the library plate
plate_config = plate.plate_configuration
library_plate = plate_module.Plate.create(lib_plate_name,
plate_config)
gdna_layout = plate.layout
primer_layout = primer_plate.layout
for i in range(plate_config.num_rows):
for j in range(plate_config.num_columns):
if gdna_layout[i][j] is not None:
well = container_module.Well.create(
library_plate, instance, volume, i + 1, j + 1)
composition_module.LibraryPrep16SComposition.create(
instance, well, volume,
gdna_layout[i][j].composition,
primer_layout[i][j].composition)
return instance
@property
def mastermix(self):
"""The master mix lot used
Returns
-------
ReagentComposition
"""
return composition_module.ReagentComposition(
self._get_attr('master_mix_id'))
@property
def water_lot(self):
"""The water lot used
Returns
-------
ReagentComposition
"""
return composition_module.ReagentComposition(
self._get_attr('water_lot_id'))
@property
def epmotion(self):
"""The EpMotion robot used
Returns
-------
Equipment
"""
return equipment_module.Equipment(self._get_attr('epmotion_robot_id'))
@property
def epmotion_tm300_tool(self):
"""The EpMotion tm300 tool used
Returns
-------
Equipment
"""
return equipment_module.Equipment(
self._get_attr('epmotion_tm300_8_tool_id'))
@property
def epmotion_tm50_tool(self):
"""The EpMotion tm50 tool used
Returns
-------
Equipment
"""
return equipment_module.Equipment(
self._get_attr('epmotion_tm50_8_tool_id'))
@property
def gdna_plate(self):
"""The input gdna plate
Returns
-------
Plate
"""
with sql_connection.TRN as TRN:
sql = """SELECT DISTINCT plate_id
FROM qiita.composition lc
JOIN qiita.library_prep_16s_composition l16sc
ON lc.composition_id = l16sc.composition_id
JOIN qiita.gdna_composition gdc
USING (gdna_composition_id)
JOIN qiita.composition gc
ON gc.composition_id = gdc.composition_id
JOIN qiita.well w ON gc.container_id = w.container_id
WHERE lc.upstream_process_id = %s"""
TRN.add(sql, [self.process_id])
return plate_module.Plate(TRN.execute_fetchlast())
@property
def primer_plate(self):
"""The primer plate
Returns
-------
plate
"""
with sql_connection.TRN as TRN:
sql = """SELECT DISTINCT plate_id
FROM qiita.composition lc
JOIN qiita.library_prep_16s_composition l16sc
ON lc.composition_id = l16sc.composition_id
JOIN qiita.primer_composition prc
USING (primer_composition_id)
JOIN qiita.composition pc
ON pc.composition_id = prc.composition_id
JOIN qiita.well w ON pc.container_id = w.container_id
WHERE lc.upstream_process_id = %s"""
TRN.add(sql, [self.process_id])
return plate_module.Plate(TRN.execute_fetchlast())
@property
def volume(self):
"""The PCR Total volume
Returns
-------
float
"""
with sql_connection.TRN as TRN:
sql = """SELECT DISTINCT total_volume
FROM qiita.composition
WHERE upstream_process_id = %s"""
TRN.add(sql, [self.process_id])
return TRN.execute_fetchlast()
class NormalizationProcess(Process):
"""Normalization process object
Attributes
----------
quantification_process
water_lot
See Also
--------
Process
"""
_table = 'qiita.normalization_process'
_id_column = 'normalization_process_id'
_process_type = 'gDNA normalization'
@staticmethod
def _calculate_norm_vol(dna_concs, ng=5, min_vol=2.5, max_vol=3500,
resolution=2.5):
"""Calculates nanoliters of each sample to add to get a normalized pool
Parameters
----------
dna_concs : numpy array of float
The concentrations calculated via PicoGreen (ng/uL)
ng : float, optional
The amount of DNA to pool (ng). Default: 5
min_vol : float, optional
The minimum volume to pool (nL). Default: 2.5
max_vol : float, optional
The maximum volume to pool (nL). Default: 3500
resolution: float, optional
Resolution to use (nL). Default: 2.5
Returns
-------
sample_vols : numpy array of float
The volumes to pool (nL)
"""
sample_vols = ng / np.nan_to_num(dna_concs) * 1000
sample_vols = np.clip(sample_vols, min_vol, max_vol)
sample_vols = np.round(sample_vols / resolution) * resolution
return sample_vols
@classmethod
def create(cls, user, quant_process, water, plate_name, total_vol=3500,
ng=5, min_vol=2.5, max_vol=3500, resolution=2.5,
reformat=False):
"""Creates a new normalization process
Parameters
----------
user : labman.db.user.User
User performing the gDNA extraction
quant_process : QuantificationProcess
The quantification process to use for normalization
water: ReagentComposition
The water lot used for the normalization
plate_name: str
The output plate name
total_vol: float, optional
The total volume of normalized DNA (nL). Default: 3500
ng : float, optional
The amount of DNA to pool (ng). Default: 5
min_vol : float, optional
The minimum volume to pool (nL). Default: 2.5
max_vol : float, optional
The maximum volume to pool (nL). Default: 3500
resolution: float, optional
Resolution to use. Default: 2.5
reformat: bool, optional
If true, reformat the plate from the interleaved format to the
column format. Useful when 384-well plate is not full to save
reagents. Default: False
Returns
-------
NormalizationProcess
"""
with sql_connection.TRN as TRN:
# Add the row to the process table
process_id = cls._common_creation_steps(user)
# Add the row to the normalization_process tables
func_data = {
'function': 'default',
'parameters': {'total_volume': total_vol, 'target_dna': ng,
'min_vol': min_vol, 'max_volume': max_vol,
'resolution': resolution, 'reformat': reformat}}
sql = """INSERT INTO qiita.normalization_process
(process_id, quantitation_process_id, water_lot_id,
normalization_function_data)
VALUES (%s, %s, %s, %s)
RETURNING normalization_process_id"""
TRN.add(sql, [process_id, quant_process.id, water.id,
dumps(func_data)])
instance = cls(TRN.execute_fetchlast())
# Retrieve all the concentration values
concs = quant_process.concentrations
# Transform the concentrations to a numpy array
np_conc = np.asarray([raw_con for _, raw_con, _ in concs])
dna_v = NormalizationProcess._calculate_norm_vol(
np_conc, ng, min_vol, max_vol, resolution)
water_v = total_vol - dna_v
# Create the plate. 3 -> 384-well plate
plate_config = plate_module.PlateConfiguration(3)
plate = plate_module.Plate.create(plate_name, plate_config)
for (comp, _, _), dna_vol, water_vol in zip(concs, dna_v, water_v):
comp_well = comp.container
row = comp_well.row
column = comp_well.column
if reformat:
row = row - 1
column = column - 1
roffset = row % 2
row = int(row - roffset + np.floor(column / 12)) + 1
coffset = column % 2 + (row % 2) * 2
column = int(coffset * 6 + (column / 2) % 6) + 1
well = container_module.Well.create(
plate, instance, total_vol, row, column)
composition_module.NormalizedGDNAComposition.create(
instance, well, total_vol, comp, dna_vol, water_vol)
return instance
@property
def quantification_process(self):
"""The quantification process used
Returns
-------
QuantificationProcess
"""
return QuantificationProcess(self._get_attr('quantitation_process_id'))
@property
def water_lot(self):
"""The water lot used
Returns
-------
ReagentComposition
"""
return composition_module.ReagentComposition(
self._get_attr('water_lot_id'))
@property
def compressed_plate(self):
"""The input compressed plate
Returns
-------
Plate
"""
with sql_connection.TRN as TRN:
sql = """SELECT DISTINCT plate_id
FROM qiita.composition nc
JOIN qiita.normalized_gdna_composition ngc
ON nc.composition_id = ngc.composition_id
JOIN qiita.compressed_gdna_composition cgdnac
USING (compressed_gdna_composition_id)
JOIN qiita.composition cc
ON cc.composition_id = cgdnac.composition_id
JOIN qiita.well w ON cc.container_id = w.container_id
WHERE nc.upstream_process_id = %s"""
TRN.add(sql, [self.process_id])
return plate_module.Plate(TRN.execute_fetchlast())
@property
def normalization_function_data(self):
"""The information about the normalization function
Returns
-------
str
"""
return self._get_attr('normalization_function_data')
@staticmethod
def _format_picklist(dna_vols, water_vols, wells, dest_wells=None,
dna_concs=None, sample_names=None,
dna_plate_name='Sample', water_plate_name='Water',
dna_plate_type='384PP_AQ_BP2_HT',
water_plate_type='384PP_AQ_BP2_HT',
dest_plate_name='NormalizedDNA',
dna_plate_names=None):
"""Formats Echo pick list to achieve a normalized input DNA pool
Parameters
----------
dna_vols: numpy array of float
The volumes of dna to add
water_vols: numpy array of float
The volumes of water to add
wells: numpy array of str
The well codes in the same orientation as the DNA concentrations
dest_wells: numpy array of str
The well codes, in the same orientation as `wells`,
in which to place each sample if reformatting
dna_concs: numpy array of float
The concentrations calculated via PicoGreen (ng/uL)
sample_names: numpy array of str
The sample names in the same orientation as the DNA concentrations
Returns
-------
picklist : str
The Echo formatted pick list
"""
# check that arrays are the right size
if dna_vols.shape != wells.shape != water_vols.shape:
raise ValueError(
'dna_vols %r has a size different from wells %r or water_vols'
% (dna_vols.shape, wells.shape, water_vols.shape))
# if destination wells not specified, use source wells
if dest_wells is None:
dest_wells = wells
if sample_names is None:
sample_names = np.empty(dna_vols.shape) * np.nan
if dna_concs is None:
dna_concs = np.empty(dna_vols.shape) * np.nan
if dna_concs.shape != sample_names.shape != dna_vols.shape:
raise ValueError(
'dna_vols %r has a size different from dna_concs %r or '
'sample_names' % (dna_vols.shape, dna_concs.shape,
sample_names.shape))
# header
picklist = [
'Sample\tSource Plate Name\tSource Plate Type\tSource Well'
'\tConcentration\tTransfer Volume\tDestination Plate Name'
'\tDestination Well']
# water additions
for index, sample in np.ndenumerate(sample_names):
picklist.append('\t'.join(
[str(sample), water_plate_name, water_plate_type,
str(wells[index]), str(dna_concs[index]),
str(water_vols[index]), dest_plate_name,
str(dest_wells[index])]))
# DNA additions
for index, sample in np.ndenumerate(sample_names):
if dna_plate_names is not None:
dna_plate_name = dna_plate_names[index]
picklist.append('\t'.join(
[str(sample), dna_plate_name, dna_plate_type,
str(wells[index]), str(dna_concs[index]),
str(dna_vols[index]), dest_plate_name,
str(dest_wells[index])]))
return '\n'.join(picklist)
def generate_echo_picklist(self):
"""Generates Echo pick list to achieve a normalized input DNA pool
Returns
-------
str
The echo-formatted pick list
"""
concentrations = {
comp: conc
for comp, conc, _ in self.quantification_process.concentrations}
dna_vols = []
water_vols = []
wells = []
dest_wells = []
sample_names = []
dna_concs = []
layout = self.plates[0].layout
for row in layout:
for well in row:
if well:
composition = well.composition
dna_vols.append(composition.dna_volume)
water_vols.append(composition.water_volume)
# For the source well we need to take a look at the
# gdna comp
c_gdna_comp = composition.compressed_gdna_composition
wells.append(c_gdna_comp.container.well_id)
dest_wells.append(well.well_id)
# For the sample name we need to check the sample
# composition
sample_comp = c_gdna_comp.gdna_composition.\
sample_composition
sample_names.append(sample_comp.content)
# For the DNA concentrations we need to look at
# the quantification process
dna_concs.append(concentrations[c_gdna_comp])
# _format_picklist expects numpy arrays
dna_vols = np.asarray(dna_vols)
water_vols = np.asarray(water_vols)
wells = np.asarray(wells)
dest_wells = np.asarray(dest_wells)
sample_names = np.asarray(sample_names)
dna_concs = np.asarray(dna_concs)
return NormalizationProcess._format_picklist(
dna_vols, water_vols, wells, dest_wells=dest_wells,
sample_names=sample_names, dna_concs=dna_concs)
class LibraryPrepShotgunProcess(Process):
"""Shotgun Library Prep process object
Attributes
----------
kappa_hyper_plus_kit
stub_lot
normalization_process
See Also
--------
Process
"""
_table = 'qiita.library_prep_shotgun_process'
_id_column = 'library_prep_shotgun_process_id'
_process_type = 'shotgun library prep'
@classmethod
def create(cls, user, plate, plate_name, kappa_hyper_plus_kit, stub_lot,
volume, i5_plate, i7_plate):
"""Creats a new LibraryPrepShotgunProcess
Parameters
----------
user : labman.db.user.User
User performing the library prep
plate: labman.db.plate.Plate
The normalized gDNA plate of origin
plate_name: str
The library
kappa_hyper_plus_kit: labman.db.composition.ReagentComposition
The Kappa Hyper Plus kit used
stub_lot: labman.db.composition.ReagentComposition
The stub lot used
volume : float
The initial volume in the wells
i5_plate: labman.db.plate.Plate
The i5 primer working plate
i7_plate: labman.db.plate.Plate
The i7 primer working plate
Returns
-------
LibraryPrepShotgunProcess
The newly created process
"""
with sql_connection.TRN as TRN:
# Add the row to the process table
process_id = cls._common_creation_steps(user)
# Add the row to the library_prep_shotgun_process
sql = """INSERT INTO qiita.library_prep_shotgun_process
(process_id, kappa_hyper_plus_kit_id, stub_lot_id,
normalization_process_id)
VALUES (%s, %s, %s, (
SELECT DISTINCT normalization_process_id
FROM qiita.normalization_process np
JOIN qiita.container c
ON np.process_id =
c.latest_upstream_process_id
JOIN qiita.well USING (container_id)
WHERE plate_id = %s))
RETURNING library_prep_shotgun_process_id"""
TRN.add(sql, [process_id, kappa_hyper_plus_kit.id, stub_lot.id,
plate.id])
instance = cls(TRN.execute_fetchlast())
# Get the primer set for the plates
sql = """SELECT DISTINCT shotgun_primer_set_id
FROM qiita.shotgun_combo_primer_set cps
JOIN qiita.primer_set_composition psc
ON cps.i5_primer_set_composition_id =
psc.primer_set_composition_id
JOIN qiita.primer_composition pc USING
(primer_set_composition_id)
JOIN qiita.composition c
ON pc.composition_id = c.composition_id
JOIN qiita.well USING (container_id)
WHERE plate_id = %s"""
TRN.add(sql, [i5_plate.id])
primer_set = composition_module.ShotgunPrimerSet(
TRN.execute_fetchlast())
# Get a list of wells that actually contain information
wells = [well for well in chain.from_iterable(plate.layout)
if well is not None]
# Get the list of index pairs to use
idx_combos = primer_set.get_next_combos(len(wells))
i5_layout = i5_plate.layout
i7_layout = i7_plate.layout
# Create the library plate
lib_plate = plate_module.Plate.create(
plate_name, plate.plate_configuration)
for well, idx_combo in zip(wells, idx_combos):
i5_well = idx_combo[0].container
i7_well = idx_combo[1].container
i5_comp = i5_layout[
i5_well.row - 1][i5_well.column - 1].composition
i7_comp = i7_layout[
i7_well.row - 1][i7_well.column - 1].composition
lib_well = container_module.Well.create(
lib_plate, instance, volume, well.row, well.column)
composition_module.LibraryPrepShotgunComposition.create(
instance, lib_well, volume, well.composition,
i5_comp, i7_comp)
return instance
@property
def kappa_hyper_plus_kit(self):
"""The Kappa Hyper plus kit used
Returns
-------
ReagentComposition
"""
return composition_module.ReagentComposition(
self._get_attr('kappa_hyper_plus_kit_id'))
@property
def stub_lot(self):
"""The stub lot used
Returns
-------
ReagentComposition
"""
return composition_module.ReagentComposition(
self._get_attr('stub_lot_id'))
@property
def normalization_process(self):
"""The normalization process used
Returns
-------
NormalizationProcess
"""
return NormalizationProcess(self._get_attr('normalization_process_id'))
@property
def normalized_plate(self):
"""The input normalized plate
Returns
-------
Plate
"""
with sql_connection.TRN as TRN:
sql = """SELECT DISTINCT plate_id
FROM qiita.composition lc
JOIN qiita.library_prep_shotgun_composition lpsc
ON lc.composition_id = lpsc.composition_id
JOIN qiita.normalized_gdna_composition ngdnac
USING (normalized_gdna_composition_id)
JOIN qiita.composition nc
ON ngdnac.composition_id = nc.composition_id
JOIN qiita.well w ON nc.container_id = w.container_id
WHERE lc.upstream_process_id = %s"""
TRN.add(sql, [self.process_id])
return plate_module.Plate(TRN.execute_fetchlast())
@property
def i5_primer_plate(self):
"""The i5 primer plate
Returns
-------
Plate
"""
with sql_connection.TRN as TRN:
sql = """SELECT DISTINCT plate_id
FROM qiita.composition lc
JOIN qiita.library_prep_shotgun_composition lsc
ON lc.composition_id = lsc.composition_id
JOIN qiita.primer_composition prc
ON lsc.i5_primer_composition_id =
prc.primer_composition_id
JOIN qiita.composition pc
ON prc.composition_id = pc.composition_id
JOIN qiita.well w ON pc.container_id = w.container_id
WHERE lc.upstream_process_id = %s"""
TRN.add(sql, [self.process_id])
return plate_module.Plate(TRN.execute_fetchlast())
@property
def i7_primer_plate(self):
"""The i7 primer plate
Returns
-------
Plate
"""
with sql_connection.TRN as TRN:
sql = """SELECT DISTINCT plate_id
FROM qiita.composition lc
JOIN qiita.library_prep_shotgun_composition lsc
ON lc.composition_id = lsc.composition_id
JOIN qiita.primer_composition prc
ON lsc.i7_primer_composition_id =
prc.primer_composition_id
JOIN qiita.composition pc
ON prc.composition_id = pc.composition_id
JOIN qiita.well w ON pc.container_id = w.container_id
WHERE lc.upstream_process_id = %s"""
TRN.add(sql, [self.process_id])
return plate_module.Plate(TRN.execute_fetchlast())
@property
def volume(self):
"""The volume
Returns
-------
float
"""
with sql_connection.TRN as TRN:
sql = """SELECT DISTINCT total_volume
FROM qiita.composition
WHERE upstream_process_id = %s"""
TRN.add(sql, [self.process_id])
return TRN.execute_fetchlast()
@staticmethod
def _format_picklist(sample_names, sample_wells, indices, i5_vol=250,
i7_vol=250, i5_plate_type='384LDV_AQ_B2_HT',
i7_plate_type='384LDV_AQ_B2_HT',
dest_plate_name='IndexPCRPlate'):
"""Formats Echo-format pick list for preparing the shotgun library
Parameters
----------
sample_names: array-like of str
The sample names matching index order of indices
sample_wells: array-like of str
The wells matching sample name order
indices: pandas DataFrame
The dataframe with index info matching sample_names
i5_vol: int, optional
The volume of i5 index to transfer. Default: 250
i7_vol: int, optional
The volume of i7 index to transfer. Default: 250
i5_plate_type: str, optional
The i5 plate type. Default: 384LDV_AQ_B2_HT
i7_plate_type: str, optional
The i7 plate type. Default: 384LDV_AQ_B2_HT
dest_plate_name: str, optional
The name of the destination plate. Default: IndexPCRPlate
Returns
-------
str
The Echo formatted pick list
"""
# check that arrays are the right size
if len(sample_names) != len(sample_wells) != len(indices):
raise ValueError(
'sample_names (%s) has a size different from sample_wells '
'(%s) or index list (%s)'
% (len(sample_names), len(sample_wells), len(indices)))
# header
picklist = [
'Sample\tSource Plate Name\tSource Plate Type\tSource Well\t'
'Transfer Volume\tIndex Name\tIndex Sequence\t'
'Destination Plate Name\tDestination Well']
# i5 additions
for i, (sample, well) in enumerate(zip(sample_names, sample_wells)):
picklist.append('\t'.join([
str(sample), indices.iloc[i]['i5 plate'], i5_plate_type,
indices.iloc[i]['i5 well'], str(i5_vol),
indices.iloc[i]['i5 name'], indices.iloc[i]['i5 sequence'],
dest_plate_name, well]))
# i7 additions
for i, (sample, well) in enumerate(zip(sample_names, sample_wells)):
picklist.append('\t'.join([
str(sample), indices.iloc[i]['i7 plate'], i7_plate_type,
indices.iloc[i]['i7 well'], str(i7_vol),
indices.iloc[i]['i7 name'], indices.iloc[i]['i7 sequence'],
dest_plate_name, well]))
return '\n'.join(picklist)
def generate_echo_picklist(self):
"""Generates Echo pick list for preparing the shotgun library
Returns
-------
str
The echo-formatted pick list
"""
sample_names = []
sample_wells = []
indices = {'i5 name': {}, 'i5 plate': {}, 'i5 sequence': {},
'i5 well': {}, 'i7 name': {}, 'i7 plate': {},
'i7 sequence': {}, 'i7 well': {}, 'index combo': {},
'index combo seq': {}}
for idx, well in enumerate(chain.from_iterable(self.plates[0].layout)):
if well is None:
continue
# Add the sample well
sample_wells.append(well.well_id)
# Get the sample name - we need to go back to the SampleComposition
lib_comp = well.composition
sample_comp = lib_comp.normalized_gdna_composition\
.compressed_gdna_composition.gdna_composition\
.sample_composition
sample_names.append(sample_comp.content)
# Retrieve all the information about the indices
i5_comp = lib_comp.i5_composition.primer_set_composition
i5_well = i5_comp.container
indices['i5 name'][idx] = i5_comp.external_id
indices['i5 plate'][idx] = i5_well.plate.external_id
indices['i5 sequence'][idx] = i5_comp.barcode
indices['i5 well'][idx] = i5_well.well_id
i7_comp = lib_comp.i7_composition.primer_set_composition
i7_well = i7_comp.container
indices['i7 name'][idx] = i7_comp.external_id
indices['i7 plate'][idx] = i7_well.plate.external_id
indices['i7 sequence'][idx] = i7_comp.barcode
indices['i7 well'][idx] = i7_well.well_id
indices['index combo seq'][idx] = '%s%s' % (
indices['i5 sequence'][idx], indices['i7 sequence'][idx])
sample_names = np.asarray(sample_names)
sample_wells = np.asarray(sample_wells)
indices = pd.DataFrame(indices)
return LibraryPrepShotgunProcess._format_picklist(
sample_names, sample_wells, indices)
class QuantificationProcess(Process):
"""Quantification process object
Attributes
----------
concentrations
See Also
--------
Process
"""
_table = 'qiita.quantification_process'
_id_column = 'quantification_process_id'
_process_type = 'quantification'
@staticmethod
def _compute_shotgun_pico_concentration(dna_vals, size=500):
"""Computes molar concentration of libraries from library DNA
concentration values.
Parameters
----------
dna_vals : numpy array of float
The DNA concentration in ng/uL
size : int
The average library molecule size in bp
Returns
-------
np.array of floats
Array of calculated concentrations, in nanomolar units
"""
lib_concentration = (dna_vals / (660 * float(size))) * 10**6
return lib_concentration
@staticmethod
def _make_2D_array(df, data_col='Sample DNA Concentration',
well_col='Well', rows=8, cols=12):
"""Pulls a column of data out of a dataframe and puts into array format
based on well IDs in another column
Parameters
----------
df: Pandas DataFrame
dataframe from which to pull values
data_col: str, optional
name of column with data. Default: Sample DNA Concentration
well_col: str, optional
name of column with well IDs, in 'A1,B12' format. Default: Well
rows: int, optional
number of rows in array to return. Default: 8
cols: int, optional
number of cols in array to return. Default: 12
Returns
-------
numpy 2D array
"""
# initialize empty Cp array
cp_array = np.empty((rows, cols), dtype=object)
# fill Cp array with the post-cleaned values from the right half of the
# plate
for record in df.iterrows():
row = ord(str.upper(record[1][well_col][0])) - ord('A')
col = int(record[1][well_col][1:]) - 1
cp_array[row, col] = record[1][data_col]
return cp_array
@staticmethod
def _parse_pico_csv(contents, sep='\t',
conc_col_name='Sample DNA Concentration'):
"""Reads tab-delimited pico quant
Parameters
----------
contents: fp or open filehandle
pico quant file
sep: str
sep char used in quant file
conc_col_name: str
name to use for concentration column output
Returns
-------
pico_df: pandas DataFrame object
DataFrame relating well location and DNA concentration
"""
raw_df = pd.read_csv(contents, sep=sep, skiprows=2, skipfooter=5,
engine='python')
pico_df = raw_df[['Well', '[Concentration]']]
pico_df = pico_df.rename(columns={'[Concentration]': conc_col_name})
# coerce oddball concentrations to np.nan
pico_df[conc_col_name] = pd.to_numeric(pico_df[conc_col_name],
errors='coerce')
return pico_df
@staticmethod
def parse(contents, file_format="minipico", rows=8, cols=12):
"""Parses the quantification output
Parameters
----------
contents : str
The contents of the plate reader output
file_format: str
The quantification file format
rows: int, optional
The number of rows in the plate. Default: 8
cols: int, optional
The number of cols in the plate. Default: 12
Returns
-------
DataFrame
"""
parsers = {'minipico': QuantificationProcess._parse_pico_csv}
contents_io = StringIO(contents)
if file_format not in parsers:
raise ValueError(
'File format %s not recognized. Supported file formats: %s'
% (file_format, ', '.join(parsers)))
df = parsers[file_format](contents_io)
array = QuantificationProcess._make_2D_array(df, rows=rows, cols=cols)
return array.astype(float)
@classmethod
def create_manual(cls, user, quantifications):
"""Creates a new manual quantification process
Parameters
----------
user: labman.db.user.User
User performing the quantification process
quantifications: list of dict
The quantifications in the form of {'composition': Composition,
'conenctration': float}
Returns
-------
QuantificationProcess
"""
with sql_connection.TRN as TRN:
# Add the row to the process table
process_id = cls._common_creation_steps(user)
# Add the row to the quantification process table
sql = """INSERT INTO qiita.quantification_process (process_id)
VALUES (%s) RETURNING quantification_process_id"""
TRN.add(sql, [process_id])
instance = cls(TRN.execute_fetchlast())
sql = """INSERT INTO qiita.concentration_calculation
(quantitated_composition_id, upstream_process_id,
raw_concentration)
VALUES (%s, %s, %s)"""
sql_args = []
for quant in quantifications:
sql_args.append([quant['composition'].composition_id,
instance.id, quant['concentration']])
TRN.add(sql, sql_args, many=True)
TRN.execute()
return instance
@classmethod
def create(cls, user, plate, concentrations):
"""Creates a new quantification process
Parameters
----------
user: labman.db.user.User
User performing the quantification process
plate: labman.db.plate.Plate
The plate being quantified
concentrations: 2D np.array
The plate concentrations
Returns
-------
QuantificationProcess
"""
with sql_connection.TRN as TRN:
# Add the row to the process table
process_id = cls._common_creation_steps(user)
# Add the row to the quantification process table
sql = """INSERT INTO qiita.quantification_process (process_id)
VALUES (%s) RETURNING quantification_process_id"""
TRN.add(sql, [process_id])
instance = cls(TRN.execute_fetchlast())
sql = """INSERT INTO qiita.concentration_calculation
(quantitated_composition_id, upstream_process_id,
raw_concentration)
VALUES (%s, %s, %s)"""
sql_args = []
layout = plate.layout
for p_row, c_row in zip(layout, concentrations):
for well, conc in zip(p_row, c_row):
if well is not None:
sql_args.append([well.composition.composition_id,
instance.id, conc])
if len(sql_args) == 0:
raise ValueError('No concentration values have been provided')
TRN.add(sql, sql_args, many=True)
TRN.execute()
return instance
@property
def concentrations(self):
"""The concentrations measured
Returns
-------
list of (Composition, float, float)
"""
with sql_connection.TRN as TRN:
sql = """SELECT quantitated_composition_id, raw_concentration,
computed_concentration
FROM qiita.concentration_calculation
WHERE upstream_process_id = %s
ORDER BY concentration_calculation_id"""
TRN.add(sql, [self._id])
return [
(composition_module.Composition.factory(comp_id), r_con, c_con)
for comp_id, r_con, c_con in TRN.execute_fetchindex()]
def compute_concentrations(self, dna_amount=240, min_val=1, max_val=15,
blank_volume=2, size=500):
"""Compute the normalized concentrations
Parameters
----------
dna_amount: float, optional
(Amplicon) Total amount of DNA, in ng. Default: 240
min_val: float, optional
(Amplicon) Minimum amount of DNA to normalize to (nM). Default: 1
max_val: float, optional
(Amplicon) Maximum value. Wells above this number will be
excluded (nM). Default: 15
blank_volume: float, optional
(Amplicon) Amount to pool for the blanks (nM). Default: 2.
size: int, optional
(Shotgun) The average library molecule size, in bp.
"""
concentrations = self.concentrations
layout = concentrations[0][0].container.plate.layout
res = None
if isinstance(concentrations[0][0],
composition_module.LibraryPrep16SComposition):
# Amplicon
sample_concs = np.zeros_like(layout, dtype=float)
is_blank = np.zeros_like(layout, dtype=bool)
for comp, r_conc, _ in concentrations:
well = comp.container
row = well.row - 1
col = well.column - 1
sample_concs[row][col] = r_conc
sc = comp.gdna_composition.sample_composition
is_blank[row][col] = sc.sample_composition_type == 'blank'
res = QuantificationProcess._compute_amplicon_pool_values(
sample_concs, dna_amount)
res[sample_concs < min_val] = min_val
# If there is any sample whose concentration is above the
# user-defined max_value, the decision is to not pool that sample.
# To not pool the sample, define it's volume to 0 and it will not
# get pooled.
res[sample_concs > max_val] = 0
res[is_blank] = blank_volume
elif isinstance(concentrations[0][0],
composition_module.LibraryPrepShotgunComposition):
# Shotgun
sample_concs = np.zeros_like(layout, dtype=float)
for comp, r_conc, _ in concentrations:
well = comp.container
row = well.row - 1
col = well.column - 1
sample_concs[row][col] = r_conc
res = QuantificationProcess._compute_shotgun_pico_concentration(
sample_concs, size)
# No need for else, because if it is not one of the above types
# we don't need to do anything
if res is not None:
sql_args = []
for p_row, c_row in zip(layout, res):
for well, conc in zip(p_row, c_row):
if well is not None:
sql_args.append([conc, self.id,
well.composition.composition_id])
sql = """UPDATE qiita.concentration_calculation
SET computed_concentration = %s
WHERE upstream_process_id = %s AND
quantitated_composition_id = %s"""
with sql_connection.TRN as TRN:
TRN.add(sql, sql_args, many=True)
TRN.execute()
@staticmethod
def _compute_amplicon_pool_values(sample_concs, dna_amount=240):
"""Computes amplicon pooling values
Parameters
----------
sample_concs: 2D array of float
nM sample concentrations
dna_amount: float, optional
Total amount of DNA, in ng. Default: 240
Returns
-------
np.array of floats
A 2D array of floats
"""
return float(dna_amount) / sample_concs
class PoolingProcess(Process):
"""Pooling process object
Attributes
----------
quantification_process
robot
See Also
--------
Process
"""
_table = 'qiita.pooling_process'
_id_column = 'pooling_process_id'
_process_type = 'pooling'
@staticmethod
def estimate_pool_conc_vol(sample_vols, sample_concs):
"""Estimates the molarity and volume of a pool.
Parameters
----------
sample_concs : numpy array of float
The concentrations calculated via PicoGreen (nM)
sample_vols : numpy array of float
The calculated pooling volumes (nL)
Returns
-------
pool_conc : float
The estimated actual concentration of the pool, in nM
total_vol : float
The total volume of the pool, in nL
"""
# scalar to adjust nL to L for molarity calculations
nl_scalar = 1e-9
# calc total pool pmols
total_pmols = np.multiply(sample_concs, sample_vols) * nl_scalar
# calc total pool vol in nanoliters
total_vol = sample_vols.sum()
# pool pM is total pmols divided by total liters
# (total vol in nL * 1 L / 10^9 nL)
pool_conc = total_pmols.sum() / (total_vol * nl_scalar)
return (pool_conc, total_vol)
@staticmethod
def compute_shotgun_pooling_values_eqvol(sample_concs, total_vol=60.0):
"""Computes molar concentration of libraries from concentration values,
using an even volume per sample
Parameters
----------
sample_concs : numpy array of float
The concentrations calculated via PicoGreen (nM)
total_vol : float, optional
The total volume to pool (uL). Default: 60
Returns
-------
np.array of floats
A 2D array of floats
"""
per_sample_vol = (total_vol / sample_concs.size) * 1000.0
sample_vols = np.zeros(sample_concs.shape) + per_sample_vol
return sample_vols
@staticmethod
def compute_shotgun_pooling_values_minvol(
sample_concs, sample_fracs=None, floor_vol=100, floor_conc=40,
total_nmol=.01):
"""Computes pooling volumes for samples based on concentration
estimates of nM concentrations (`sample_concs`), taking a minimum
volume of samples below a threshold.
Reads in concentration values in nM. Samples below a minimum
concentration (`floor_conc`, default 40 nM) will be included, but at a
decreased volume (`floor_vol`, default 100 nL) to avoid overdiluting
the pool.
Samples can be assigned a target molar fraction in the pool by passing
a np.array (`sample_fracs`, same shape as `sample_concs`) with
fractional values per sample. By default, will aim for equal molar
pooling.
Finally, total pooling size is determined by a target nanomolar
quantity (`total_nmol`, default .01). For a perfect 384 sample library,
in which you had all samples at a concentration of exactly 400 nM and
wanted a total volume of 60 uL, this would be 0.024 nmol.
For a Novaseq, we expect to need 150 uL at 4 nM, or about 0.0006 nmol.
Taking into account sample loss on the pippin prep (1/2) and molar loss
due to exclusion of primer dimers (1/2), figure we need 4 times that or
0.0024.
Parameters
----------
sample_concs: 2D array of float
nM sample concentrations
sample_fracs: 2D of float, optional
fractional value for each sample (default 1/N)
floor_vol: float, optional
volume (nL) at which samples below floor_conc will be pooled.
Default: 100
floor_conc: float, optional
minimum value (nM) for pooling at real estimated value. Default: 40
total_nmol : float, optional
total number of nM to have in pool. Default: 0.01
Returns
-------
sample_vols: np.array of floats
the volumes in nL per each sample pooled
"""
if sample_fracs is None:
sample_fracs = np.ones(sample_concs.shape) / sample_concs.size
# calculate volumetric fractions including floor val
sample_vols = (total_nmol * sample_fracs) / sample_concs
# convert L to nL
sample_vols *= 10**9
# drop volumes for samples below floor concentration to floor_vol
sample_vols[sample_concs < floor_conc] = floor_vol
return sample_vols
@staticmethod
def compute_shotgun_pooling_values_floor(
sample_concs, sample_fracs=None, min_conc=10, floor_conc=50,
total_nmol=.01):
"""Computes pooling volumes for samples based on concentration
estimates of nM concentrations (`sample_concs`).
Reads in concentration values in nM. Samples must be above a minimum
concentration threshold (`min_conc`, default 10 nM) to be included.
Samples above this threshold but below a given floor concentration
(`floor_conc`, default 50 nM) will be pooled as if they were at the
floor concentration, to avoid overdiluting the pool.
Samples can be assigned a target molar fraction in the pool by passing
a np.array (`sample_fracs`, same shape as `sample_concs`) with
fractional values per sample. By default, will aim for equal molar
pooling.
Finally, total pooling size is determined by a target nanomolar
quantity (`total_nmol`, default .01). For a perfect 384 sample library,
in which you had all samples at a concentration of exactly 400 nM and
wanted a total volume of 60 uL, this would be 0.024 nmol.
Parameters
----------
sample_concs: 2D array of float
nM calculated by compute_qpcr_concentration
sample_fracs: 2D of float, optional
fractional value for each sample (default 1/N)
min_conc: float, optional
minimum nM concentration to be included in pool. Default: 10
floor_conc: float, optional
minimum value for pooling for samples above min_conc. Default: 50
total_nmol : float, optional
total number of nM to have in pool. Default 0.01
Returns
-------
sample_vols: np.array of floats
the volumes in nL per each sample pooled
"""
if sample_fracs is None:
sample_fracs = np.ones(sample_concs.shape) / sample_concs.size
# get samples above threshold
sample_fracs_pass = sample_fracs.copy()
sample_fracs_pass[sample_concs <= min_conc] = 0
# renormalize to exclude lost samples
sample_fracs_pass *= 1/sample_fracs_pass.sum()
# floor concentration value
sample_concs_floor = sample_concs.copy()
sample_concs_floor[sample_concs < floor_conc] = floor_conc
# calculate volumetric fractions including floor val
sample_vols = (total_nmol * sample_fracs_pass) / sample_concs_floor
# convert L to nL
sample_vols *= 10**9
return sample_vols
@classmethod
def create(cls, user, quantification_process, pool_name, volume,
input_compositions, func_data, robot=None, destination=None):
"""Creates a new pooling process
Parameters
----------
user: labman.db.user.User
User performing the pooling process
quantification_process: labman.db.process.QuantificationProcess
The quantification process this pooling is based on
pool_name: str
The name of the new pool
volume: float
The initial volume
input_compositions: list of dicts
The input compositions for the pool {'composition': Composition,
'input_volume': float, 'percentage_of_output': float}
func_data : dict
Dictionary with the pooling function information
robot: labman.equipment.Equipment, optional
The robot performing the pooling, if not manual
destination: str
The EpMotion destination tube
Returns
-------
PoolingProcess
"""
with sql_connection.TRN as TRN:
# Add the row to the process table
process_id = cls._common_creation_steps(user)
# Add the row to the pooling process table
sql = """INSERT INTO qiita.pooling_process
(process_id, quantification_process_id, robot_id,
destination, pooling_function_data)
VALUES (%s, %s, %s, %s, %s)
RETURNING pooling_process_id"""
r_id = robot.id if robot is not None else None
if r_id is None:
destination = None
TRN.add(sql, [process_id, quantification_process.id, r_id,
destination, dumps(func_data)])
instance = cls(TRN.execute_fetchlast())
# Create the new pool
tube = container_module.Tube.create(instance, pool_name, volume)
pool = composition_module.PoolComposition.create(
instance, tube, volume)
# Link the pool with its contents
sql = """INSERT INTO qiita.pool_composition_components
(output_pool_composition_id, input_composition_id,
input_volume, percentage_of_output)
VALUES (%s, %s, %s, %s)"""
sql_args = []
for in_comp in input_compositions:
# The wet lab pointed out that we don't need to pool the ones
# that have a value below 0.001
if in_comp['input_volume'] < 0.001:
continue
sql_args.append([pool.id,
in_comp['composition'].composition_id,
in_comp['input_volume'],
in_comp['percentage_of_output']])
TRN.add(sql, sql_args, many=True)
TRN.execute()
return instance
@property
def quantification_process(self):
"""The quantification process used
Returns
-------
QuantificationProcess
"""
return QuantificationProcess(
self._get_attr('quantification_process_id'))
@property
def robot(self):
"""The robot used
Returns
-------
Equipment
"""
return equipment_module.Equipment(self._get_attr('robot_id'))
@property
def destination(self):
"""The EpMotion destination tube
Returns
-------
str
"""
return self._get_attr('destination')
@property
def components(self):
"""The components of the pool
Returns
-------
list of (Composition, float)
"""
with sql_connection.TRN as TRN:
sql = """SELECT input_composition_id, input_volume
FROM qiita.pool_composition_components
JOIN qiita.pool_composition
ON output_pool_composition_id = pool_composition_id
JOIN qiita.composition USING (composition_id)
WHERE upstream_process_id = %s
ORDER BY pool_composition_components_id"""
TRN.add(sql, [self.process_id])
return [(composition_module.Composition.factory(comp_id), vol)
for comp_id, vol in TRN.execute_fetchindex()]
@property
def pool(self):
"""The generated pool composition
Returns
-------
PoolComposition
"""
with sql_connection.TRN as TRN:
sql = """SELECT composition_id
FROM qiita.composition
WHERE upstream_process_id = %s"""
TRN.add(sql, [self.process_id])
return composition_module.Composition.factory(
TRN.execute_fetchlast())
@property
def pooling_function_data(self):
"""The information about the pooling process
Returns
-------
dict
"""
return self._get_attr('pooling_function_data')
@staticmethod
def _format_picklist(vol_sample, max_vol_per_well=60000,
dest_plate_shape=None):
"""Format the contents of an echo pooling pick list
Parameters
----------
vol_sample : 2d numpy array of floats
The per well sample volume, in nL
max_vol_per_well : floats, optional
Maximum destination well volume, in nL. Default: 60000
dest_plate_shape: list of 2 elements
The destination plate shape
"""
if dest_plate_shape is None:
dest_plate_shape = [16, 24]
contents = ['Source Plate Name,Source Plate Type,Source Well,'
'Concentration,Transfer Volume,Destination Plate Name,'
'Destination Well']
# Write the sample transfer volumes
rows, cols = vol_sample.shape
# replace NaN values with 0s to leave a trail of unpooled wells
pool_vols = np.nan_to_num(vol_sample)
running_tot = 0
d = 1
for i in range(rows):
for j in range(cols):
well_name = "%s%d" % (chr(ord('A') + i), j+1)
# Machine will round, so just give it enough info to do the
# correct rounding.
val = "%.2f" % pool_vols[i][j]
# test to see if we will exceed total vol per well
if running_tot + pool_vols[i][j] > max_vol_per_well:
d += 1
running_tot = pool_vols[i][j]
else:
running_tot += pool_vols[i][j]
dest = "%s%d" % (chr(ord('A') +
int(np.floor(d/dest_plate_shape[0]))),
(d % dest_plate_shape[1]))
contents.append(",".join(['1', '384LDV_AQ_B2_HT', well_name,
"", val, 'NormalizedDNA', dest]))
return "\n".join(contents)
def generate_echo_picklist(self, max_vol_per_well=30000):
"""Generates Echo pick list for pooling the shotgun library
Parameters
----------
max_vol_per_well : floats, optional
Maximum destination well volume, in nL. Default: 30000
Returns
-------
str
The echo-formatted pick list
"""
vol_sample = np.zeros((16, 24))
for comp, vol in self.components:
well = comp.container
vol_sample[well.row - 1][well.column - 1] = vol
return PoolingProcess._format_picklist(vol_sample)
def generate_epmotion_file(self):
"""Generates an EpMotion file to perform the pooling
Returns
-------
str
The EpMotion-formatted pool file contents
"""
contents = ['Rack,Source,Rack,Destination,Volume,Tool']
destination = self.destination
for comp, vol in self.components:
source = comp.container.well_id
val = "%.3f" % vol
# Hard-coded values - never changes according to the wet lab
contents.append(
",".join(['1', source, '1', destination, val, '1']))
return "\n".join(contents)
def generate_pool_file(self):
"""Generates the correct pool file based on the pool contents
Returns
-------
str
The contents of the pool file
"""
comp = self.components[0][0]
if isinstance(comp, composition_module.LibraryPrep16SComposition):
return self.generate_epmotion_file()
elif isinstance(comp,
composition_module.LibraryPrepShotgunComposition):
return self.generate_echo_picklist()
else:
# This error should only be shown to programmers
raise ValueError(
"Can't generate a pooling file for a pool containing "
"compositions of type: %s" % comp.__class__.__name__)
class SequencingProcess(Process):
"""Sequencing process object
Attributes
----------
See Also
--------
Process
"""
_table = 'qiita.sequencing_process'
_id_column = 'sequencing_process_id'
_process_type = 'sequencing'
sequencer_lanes = {
'HiSeq4000': 8, 'HiSeq3000': 8, 'HiSeq2500': 2, 'HiSeq1500': 2,
'MiSeq': 1, 'MiniSeq': 1, 'NextSeq': 1, 'NovaSeq': 1}
@staticmethod
def list_sequencing_runs():
"""Generates a list of sequencing runs
Returns
-------
list of dicts
The list of sequence run information with the structure:
[{'process_id': int, 'run_name': string, ...}]
"""
with sql_connection.TRN as TRN:
sql = """SELECT *
FROM qiita.sequencing_process
ORDER BY process_id"""
TRN.add(sql)
return [dict(r) for r in TRN.execute_fetchindex()]
@classmethod
def create(cls, user, pools, run_name, experiment, sequencer,
fwd_cycles, rev_cycles, principal_investigator,
contacts=None):
"""Creates a new sequencing process
Parameters
----------
user : labman.db.user.User
User preparing the sequencing
pools: list of labman.db.composition.PoolComposition
The pools being sequenced, in lane order
run_name: str
The run name
experiment: str
The run experiment
sequencer: labman.db.equipment.Equipment
The sequencer used
fwd_cycles : int
The number of forward cycles
rev_cycles : int
The number of reverse cycles
principal_investigator : labman.db.user.User
The principal investigator to list in the run
contacts: list of labman.db.user.User, optinal
Any additional contacts to add to the Sample Sheet
Returns
-------
SequencingProcess
Raises
------
ValueError
If the number of cycles are <= 0
"""
if fwd_cycles <= 0 or not isinstance(fwd_cycles, int):
raise ValueError("fwd_cycles must be > 0")
if rev_cycles <= 0 or not isinstance(rev_cycles, int):
raise ValueError("rev_cycles must be > 0")
if len(pools) > cls.sequencer_lanes[sequencer.equipment_type]:
raise ValueError(
'Number of pools cannot be bigger than the number of lanes '
'in the sequencer. Pools: %s. Lanes in a %s sequencer: %s'
% (len(pools), sequencer.equipment_type,
cls.sequencer_lanes[sequencer.equipment_type]))
with sql_connection.TRN as TRN:
# Add the row to the process table
process_id = cls._common_creation_steps(user)
assay = None
pool = pools[0]
CM = composition_module
while assay is None:
comp = pool.components[0]['composition']
if isinstance(comp, CM.LibraryPrep16SComposition):
assay = 'Amplicon'
elif isinstance(comp, CM.LibraryPrepShotgunComposition):
assay = 'Metagenomics'
elif isinstance(comp, CM.PoolComposition):
pool = comp
else:
# This should never happen - i.e. there is no way
# of creating a pool like that
raise ValueError(
'Pool with unexpected composition type: %s'
% comp.__class__.__name__)
# Add the row to the sequencing table
sql = """INSERT INTO qiita.sequencing_process
(process_id, run_name, experiment, sequencer_id,
fwd_cycles, rev_cycles, assay, principal_investigator)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
RETURNING sequencing_process_id"""
TRN.add(sql, [process_id, run_name, experiment, sequencer.id,
fwd_cycles, rev_cycles, assay,
principal_investigator.id])
instance = cls(TRN.execute_fetchlast())
sql = """INSERT INTO qiita.sequencing_process_lanes
(sequencing_process_id, pool_composition_id,
lane_number)
VALUES (%s, %s, %s)"""
sql_args = [[instance.id, p.id, i + 1]
for i, p in enumerate(pools)]
TRN.add(sql, sql_args, many=True)
if contacts:
sql = """INSERT INTO qiita.sequencing_process_contacts
(sequencing_process_id, contact_id)
VALUES (%s, %s)"""
sql_args = [[instance.id, c.id] for c in contacts]
TRN.add(sql, sql_args, many=True)
TRN.execute()
return instance
@property
def pools(self):
with sql_connection.TRN as TRN:
sql = """SELECT pool_composition_id, lane_number
FROM qiita.sequencing_process_lanes
WHERE sequencing_process_id = %s
ORDER BY lane_number"""
TRN.add(sql, [self.id])
res = [[composition_module.PoolComposition(p), l]
for p, l in TRN.execute_fetchindex()]
return res
@property
def run_name(self):
return self._get_attr('run_name')
@property
def experiment(self):
return self._get_attr('experiment')
@property
def sequencer(self):
return equipment_module.Equipment(self._get_attr('sequencer_id'))
@property
def fwd_cycles(self):
return self._get_attr('fwd_cycles')
@property
def rev_cycles(self):
return self._get_attr('rev_cycles')
@property
def assay(self):
return self._get_attr('assay')
@property
def principal_investigator(self):
return user_module.User(self._get_attr('principal_investigator'))
@property
def contacts(self):
with sql_connection.TRN as TRN:
sql = """SELECT contact_id
FROM qiita.sequencing_process_contacts
WHERE sequencing_process_id = %s
ORDER BY contact_id"""
TRN.add(sql, [self.id])
return [user_module.User(r[0]) for r in TRN.execute_fetchindex()]
@staticmethod
def _bcl_scrub_name(name):
"""Modifies a sample name to be BCL2fastq compatible
Parameters
----------
name : str
the sample name
Returns
-------
str
the sample name, formatted for bcl2fastq
"""
return re.sub('[^0-9a-zA-Z\-\_]+', '_', name)
@staticmethod
def _reverse_complement(seq):
"""Reverse-complement a sequence
From http://stackoverflow.com/a/25189185/7146785
Parameters
----------
seq : str
The sequence to reverse-complement
Returns
-------
str
The reverse-complemented sequence
"""
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
rev_seq = "".join(complement.get(base, base) for base in reversed(seq))
return rev_seq
@staticmethod
def _sequencer_i5_index(sequencer, indices):
"""Decides if the indices should be reversed based on the sequencer
"""
revcomp_sequencers = ['HiSeq4000', 'MiniSeq', 'NextSeq', 'HiSeq3000']
other_sequencers = ['HiSeq2500', 'HiSeq1500', 'MiSeq', 'NovaSeq']
if sequencer in revcomp_sequencers:
return([SequencingProcess._reverse_complement(x) for x in indices])
elif sequencer in other_sequencers:
return(indices)
else:
raise ValueError(
'Your indicated sequencer [%s] is not recognized.\nRecognized '
'sequencers are: \n' %
' '.join(revcomp_sequencers + other_sequencers))
@staticmethod
def _format_sample_sheet_data(sample_ids, i7_name, i7_seq, i5_name, i5_seq,
wells=None, sample_plates=None,
sample_proj='', description=None, lanes=[1],
sep=',', include_header=True):
"""Creates the [Data] component of the Illumina sample sheet
Parameters
----------
sample_ids: array-like
The bcl2fastq-compatible sample ids
i7_name: array-like
The i7 index name, in sample_ids order
i7_seq: array-like
The i7 sequences, in sample_ids order
i5_name: array-like
The i5 index name, in sample_ids order
i5_seq: array-like
The i5 sequences, in sample_ids order
wells: array-like, optional
The source sample wells, in sample_ids order. Default: None
sample_plate: str, optional
The plate name. Default: ''
sample_proj: str, optional
The project name. Default: ''
description: array-like, optional
The original sample ids, in sample_ids order. Default: None
lanes: array-lie, optional
The lanes in which the pool will be sequenced. Default: [1]
sep: str, optional
The file-format separator. Default: ','
include_header: bool, optional
Wheather to include the header or not. Default: true
Returns
-------
str
The formatted [Data] component of the Illumina sample sheet
Raises
------
ValueError
If sample_ids, i7_name, i7_seq, i5_name and i5_seq do not have all
the same length
"""
if sample_plates is None:
sample_plates = [''] * len(sample_ids)
if (len(sample_ids) != len(i7_name) != len(i7_seq) !=
len(i5_name) != len(i5_seq) != len(sample_plates)):
raise ValueError('Sample information lengths are not all equal')
if wells is None:
wells = [''] * len(sample_ids)
if description is None:
description = [''] * len(sample_ids)
data = []
for lane in lanes:
for i, sample in enumerate(sample_ids):
line = sep.join([str(lane), sample, sample, sample_plates[i],
wells[i], i7_name[i], i7_seq[i], i5_name[i],
i5_seq[i], sample_proj, description[i]])
data.append(line)
data = sorted(data)
if include_header:
data.insert(0, sep.join([
'Lane', 'Sample_ID', 'Sample_Name', 'Sample_Plate',
'Sample_Well', 'I7_Index_ID', 'index', 'I5_Index_ID', 'index2',
'Sample_Project', 'Description']))
return '\n'.join(data)
@staticmethod
def _format_sample_sheet_comments(principal_investigator=None,
contacts=None, other=None, sep=','):
"""Formats the sample sheet comments
Parameters
----------
principal_investigator: dict, optional
The principal investigator information: {name: email}
contacts: dict, optional
The contacts information: {name: email}
other: str, optional
Other information to include in the sample sheet comments
sep: str, optional
The sample sheet separator
Returns
-------
str
The formatted comments of the sample sheet
"""
comments = []
if principal_investigator is not None:
comments.append('PI{0}{1}\n'.format(
sep, sep.join(
'{0}{1}{2}'.format(x, sep, principal_investigator[x])
for x in principal_investigator.keys())))
if contacts is not None:
comments.append(
'Contact{0}{1}\nContact emails{0}{2}\n'.format(
sep, sep.join(x for x in sorted(contacts.keys())),
sep.join(contacts[x] for x in sorted(contacts.keys()))))
if other is not None:
comments.append('%s\n' % other)
return ''.join(comments)
@staticmethod
def _format_sample_sheet(sample_sheet_dict, sep=','):
"""Formats Illumina-compatible sample sheet.
Parameters
----------
sample_sheet_dict : dict
dict with the sample sheet information
sep: str, optional
The sample sheet separator
Returns
-------
sample_sheet : str
the sample sheet string
"""
template = (
'{comments}[Header]\nIEMFileVersion{sep}{IEMFileVersion}\n'
'Investigator Name{sep}{Investigator Name}\n'
'Experiment Name{sep}{Experiment Name}\nDate{sep}{Date}\n'
'Workflow{sep}{Workflow}\nApplication{sep}{Application}\n'
'Assay{sep}{Assay}\nDescription{sep}{Description}\n'
'Chemistry{sep}{Chemistry}\n\n[Reads]\n{read1}\n{read2}\n\n'
'[Settings]\nReverseComplement{sep}{ReverseComplement}\n\n'
'[Data]\n{data}')
if sample_sheet_dict['comments']:
sample_sheet_dict['comments'] = re.sub(
'^', '# ', sample_sheet_dict['comments'].rstrip(),
flags=re.MULTILINE) + '\n'
sample_sheet = template.format(**sample_sheet_dict, **{'sep': sep})
return sample_sheet
def _generate_shotgun_sample_sheet(self):
"""Generates Illumina compatible shotgun sample sheets
Returns
-------
str
The illumina-formatted sample sheet
"""
bcl2fastq_sample_ids = []
i7_names = []
i7_sequences = []
i5_names = []
i5_sequences = []
wells = []
sample_ids = []
sample_plates = []
sequencer_type = self.sequencer.equipment_type
data = []
include_header = True
for pool, lane in self.pools:
for component in pool.components:
lp_composition = component['composition']
# Get the well information
well = lp_composition.container
wells.append(well.well_id)
# Get the plate information
sample_plates.append(well.plate.external_id)
# Get the i7 index information
i7_comp = lp_composition.i7_composition.primer_set_composition
i7_names.append(i7_comp.external_id)
i7_sequences.append(i7_comp.barcode)
# Get the i5 index information
i5_comp = lp_composition.i5_composition.primer_set_composition
i5_names.append(i5_comp.external_id)
i5_sequences.append(i5_comp.barcode)
# Get the sample id
sample_id = lp_composition.normalized_gdna_composition.\
compressed_gdna_composition.gdna_composition.\
sample_composition.content
sample_ids.append(sample_id)
# Transform te sample ids to be bcl2fastq-compatible
bcl2fastq_sample_ids = [
SequencingProcess._bcl_scrub_name(sid) for sid in sample_ids]
# Reverse the i5 sequences if needed based on the sequencer
i5_sequences = SequencingProcess._sequencer_i5_index(
sequencer_type, i5_sequences)
# add the data of the curent pool
data.append(SequencingProcess._format_sample_sheet_data(
bcl2fastq_sample_ids, i7_names, i7_sequences, i5_names,
i5_sequences, wells=wells, sample_plates=sample_plates,
description=sample_ids, sample_proj=self.run_name,
lanes=[lane], sep=',', include_header=include_header))
include_header = False
data = '\n'.join(data)
contacts = {c.name: c.email for c in self.contacts}
pi = self.principal_investigator
principal_investigator = {pi.name: pi.email}
sample_sheet_dict = {
'comments': SequencingProcess._format_sample_sheet_comments(
principal_investigator=principal_investigator,
contacts=contacts),
'IEMFileVersion': '4',
'Investigator Name': pi.name,
'Experiment Name': self.experiment,
'Date': str(self.date),
'Workflow': 'GenerateFASTQ',
'Application': 'FASTQ Only',
'Assay': self.assay,
'Description': '',
'Chemistry': 'Default',
'read1': self.fwd_cycles,
'read2': self.rev_cycles,
'ReverseComplement': '0',
'data': data}
return SequencingProcess._format_sample_sheet(sample_sheet_dict)
def _generate_amplicon_sample_sheet(self):
"""Generates Illumina compatible sample sheets
Returns
-------
str
The illumina-formatted sample sheet
"""
fixed_run_name = SequencingProcess._bcl_scrub_name(self.run_name)
data = (
'Sample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,'
'index,Sample_Project,Description,,\n'
'%s,,,,,NNNNNNNNNNNN,,,,,' % fixed_run_name)
contacts = {c.name: c.email for c in self.contacts}
pi = self.principal_investigator
principal_investigator = {pi.name: pi.email}
sample_sheet_dict = {
'comments': SequencingProcess._format_sample_sheet_comments(
principal_investigator=principal_investigator,
contacts=contacts),
'IEMFileVersion': '4',
'Investigator Name': pi.name,
'Experiment Name': self.experiment,
'Date': str(self.date),
'Workflow': 'GenerateFASTQ',
'Application': 'FASTQ Only',
'Assay': self.assay,
'Description': '',
'Chemistry': 'Default',
'read1': self.fwd_cycles,
'read2': self.rev_cycles,
'ReverseComplement': '0',
'data': data}
return SequencingProcess._format_sample_sheet(sample_sheet_dict)
def generate_sample_sheet(self):
"""Generates Illumina compatible sample sheets
Returns
-------
str
The illumina-formatted sample sheet
"""
assay = self.assay
if assay == 'Amplicon':
return self._generate_amplicon_sample_sheet()
elif assay == 'Metagenomics':
return self._generate_shotgun_sample_sheet()
def generate_prep_information(self):
"""Generates prep information
Returns
-------
dict labman.db.study.Study: str
a dict of the Study and the prep
"""
assay = self.assay
data = {}
blanks = {}
if assay == 'Amplicon':
extra_fields = [
# 'e'/'r': equipment/reagent
('e', 'lepmotion_robot_id', 'epmotion_robot'),
('e', 'epmotion_tm300_8_tool_id', 'epmotion_tm300_8_tool'),
('e', 'epmotion_tm50_8_tool_id', 'epmotion_tm50_8_tool'),
('e', 'gepmotion_robot_id', 'gdata_robot'),
('e', 'epmotion_tool_id', 'epmotion_tool'),
('e', 'kingfisher_robot_id', 'kingfisher_robot'),
('r', 'extraction_kit_id', 'extraction_kit'),
('r', 'master_mix_id', 'master_mix'),
('r', 'water_lot_id', 'water_lot'),
]
sql = """
SELECT study_id, sample_id, content, run_name, experiment,
fwd_cycles, rev_cycles, principal_investigator,
et.description as sequencer_description,
lpp.epmotion_robot_id as lepmotion_robot_id,
epmotion_tm300_8_tool_id, epmotion_tm50_8_tool_id,
master_mix_id, water_lot_id,
gep.epmotion_robot_id as gepmotion_robot_id,
epmotion_tool_id, kingfisher_robot_id,
extraction_kit_id,
p1.external_id as plate, w1.row_num as row_num,
w1.col_num as col_num,
p2.external_id as primer_composition,
psc.barcode_seq as primer_set_composition,
run_name as run_prefix, sp.sequencer_id as platform_id,
sp.experiment as center_project_name
-- Retrieve sequencing information
FROM qiita.sequencing_process sp
LEFT JOIN qiita.equipment e ON (
sequencer_id = equipment_id)
LEFT JOIN qiita.equipment_type et ON (
et.equipment_type_id = e.equipment_type_id)
LEFT JOIN qiita.sequencing_process_lanes spl USING (
sequencing_process_id)
-- Retrieve pooling information
LEFT JOIN qiita.pool_composition_components pcc1 ON (
pcc1.output_pool_composition_id = spl.pool_composition_id)
LEFT JOIN qiita.pool_composition pccon ON (
pcc1.input_composition_id = pccon.composition_id)
LEFT JOIN qiita.pool_composition_components pcc2 ON (
pccon.pool_composition_id =
pcc2.output_pool_composition_id)
-- Retrieve amplicon library prep information
LEFT JOIN qiita.library_prep_16S_composition lp ON (
pcc2.input_composition_id = lp.composition_id)
LEFT JOIN qiita.composition c1 ON (
lp.composition_id = c1.composition_id)
LEFT JOIN qiita.library_prep_16s_process lpp ON (
lpp.process_id = c1.upstream_process_id)
-- Retrieve the extracted gdna information
LEFT JOIN qiita.gdna_composition gc USING (gdna_composition_id)
LEFT JOIN qiita.composition c2 ON (
gc.composition_id = c2.composition_id)
LEFT JOIN qiita.gdna_extraction_process gep ON (
gep.process_id = c2.upstream_process_id)
-- Retrieve the sample information
LEFT JOIN qiita.sample_composition sc USING (
sample_composition_id)
LEFT JOIN qiita.composition c3 ON (
c3.composition_id = sc.composition_id)
LEFT JOIN qiita.well w1 ON (
w1.container_id = c3.container_id)
LEFT JOIN qiita.plate p1 ON (
w1.plate_id = p1.plate_id)
LEFT JOIN qiita.composition c4 ON (
lp.primer_composition_id = c4.composition_id
)
LEFT JOIN qiita.well w2 ON (
w2.container_id = c4.container_id)
LEFT JOIN qiita.plate p2 ON (
w2.plate_id = p2.plate_id)
LEFT JOIN qiita.primer_composition pc ON (
lp.primer_composition_id = pc.primer_composition_id)
LEFT JOIN qiita.primer_set_composition psc ON (
pc.primer_set_composition_id =
psc.primer_set_composition_id)
FULL JOIN qiita.study_sample USING (sample_id)
WHERE sequencing_process_id = %s
ORDER BY study_id, sample_id, row_num, col_num"""
elif assay == 'Metagenomics':
extra_fields = [
('e', 'gepmotion_robot_id', 'gdata_robot'),
('e', 'epmotion_tool_id', 'epmotion_tool'),
('e', 'kingfisher_robot_id', 'kingfisher_robot'),
('r', 'kappa_hyper_plus_kit_id', 'kappa_hyper_plus_kit'),
('r', 'stub_lot_id', 'stub_lot'),
('r', 'extraction_kit_id', 'extraction_kit'),
('r', 'nwater_lot_id', 'normalization_water_lot'),
]
sql = """
SELECT study_id, sample_id, content, run_name, experiment,
fwd_cycles, rev_cycles, principal_investigator,
i5.barcode_seq as i5_sequence,
i7.barcode_seq as i5_sequence,
et.description as sequencer_description,
gep.epmotion_robot_id as gepmotion_robot_id,
epmotion_tool_id, kingfisher_robot_id,
extraction_kit_id, np.water_lot_id as nwater_lot_id,
kappa_hyper_plus_kit_id, stub_lot_id,
p1.external_id as plate, row_num, col_num,
sp.sequencer_id as platform_id,
sp.experiment as center_project_name
-- Retrieve sequencing information
FROM qiita.sequencing_process sp
LEFT JOIN qiita.equipment e ON (
sequencer_id = equipment_id)
LEFT JOIN qiita.equipment_type et ON (
et.equipment_type_id = e.equipment_type_id)
LEFT JOIN qiita.sequencing_process_lanes USING (
sequencing_process_id)
-- Retrieving pool information
LEFT JOIN qiita.pool_composition_components ON (
output_pool_composition_id = pool_composition_id)
-- Retrieving library prep information
LEFT JOIN qiita.library_prep_shotgun_composition ON (
input_composition_id = composition_id)
LEFT JOIN qiita.primer_composition i5pc ON (
i5_primer_composition_id = i5pc.primer_composition_id)
LEFT JOIN qiita.primer_set_composition i5 ON (
i5pc.primer_set_composition_id =
i5.primer_set_composition_id
)
LEFT JOIN qiita.primer_composition i7pc ON (
i7_primer_composition_id = i7pc.primer_composition_id)
LEFT JOIN qiita.primer_set_composition i7 ON (
i7pc.primer_set_composition_id =
i7.primer_set_composition_id
)
-- Retrieving normalized gdna information
LEFT JOIN qiita.normalized_gdna_composition ngc USING (
normalized_gdna_composition_id)
LEFT JOIN qiita.composition c1 ON (
ngc.composition_id = c1.composition_id)
LEFT JOIN qiita.library_prep_shotgun_process lps ON (
lps.process_id = c1.upstream_process_id)
LEFT JOIN qiita.normalization_process np USING (
normalization_process_id)
-- Retrieving compressed gdna information
LEFT JOIN qiita.compressed_gdna_composition cgc USING (
compressed_gdna_composition_id)
-- Retrieving gdna information
LEFT JOIN qiita.gdna_composition gc USING (gdna_composition_id)
LEFT JOIN qiita.composition c2 ON (
gc.composition_id = c2.composition_id)
LEFT JOIN qiita.gdna_extraction_process gep ON (
gep.process_id = c2.upstream_process_id)
LEFT JOIN qiita.sample_composition sc USING (
sample_composition_id)
LEFT JOIN qiita.composition c3 ON (
c3.composition_id = sc.composition_id)
LEFT JOIN qiita.well w1 ON (
w1.container_id = c3.container_id)
LEFT JOIN qiita.plate p1 ON (
w1.plate_id = p1.plate_id)
FULL JOIN qiita.study_sample USING (sample_id)
WHERE sequencing_process_id = %s
ORDER BY study_id, sample_id, row_num, col_num, i5.barcode_seq
"""
with sql_connection.TRN as TRN:
# to simplify the main queries, let's get all the equipment info
TRN.add("""SELECT equipment_id, external_id, notes, description
FROM qiita.equipment
LEFT JOIN qiita.equipment_type
USING (equipment_type_id)""")
equipment = {}
for row in TRN.execute_fetchindex():
row = dict(row)
eid = row.pop('equipment_id')
equipment[eid] = row
# and the reagents
TRN.add("""SELECT reagent_composition_id, composition_id,
external_lot_id, description
FROM qiita.reagent_composition
LEFT JOIN qiita.reagent_composition_type
USING (reagent_composition_type_id)""")
reagent = {}
for row in TRN.execute_fetchindex():
row = dict(row)
rid = row.pop('reagent_composition_id')
reagent[rid] = row
TRN.add(sql, [self.id])
for result in TRN.execute_fetchindex():
result = dict(result)
study_id = result.pop('study_id')
sid = result.pop('sample_id')
content = result.pop('content')
# format well
col = result.pop('col_num')
row = result.pop('row_num')
well = []
while row:
row, rem = divmod(row-1, 26)
well[:0] = container_module.LETTERS[rem]
result['well'] = ''.join(well) + str(col)
# format extra fields list
for t, k, nk in extra_fields:
_id = result.pop(k)
if _id is not None:
if t == 'e':
val = equipment[_id]['external_id']
else:
val = reagent[_id]['external_lot_id']
else:
val = ''
result[nk] = val
# format some final fields
result['platform'] = equipment[
result.pop('platform_id')]['description']
if sid is not None and study_id is not None:
study = Study(study_id)
if study not in data:
data[study] = {}
data[study][content] = result
if assay == 'Metagenomics':
result['run_prefix'] = \
SequencingProcess._bcl_scrub_name(content)
else:
if assay == 'Metagenomics':
result['run_prefix'] = \
SequencingProcess._bcl_scrub_name(content)
blanks[content] = result
# converting from dict to pandas and then to tsv
for study, vals in data.items():
merged = {**vals, **blanks}
df = pd.DataFrame.from_dict(merged, orient='index')
df.sort_index(inplace=True)
cols = sorted(list(df.columns))
sio = StringIO()
df[cols].to_csv(sio, sep='\t', index_label='sample_name')
data[study] = sio.getvalue()
return data
| bsd-3-clause |
soylentdeen/cuddly-weasel | gfVerification/plotSolar.py | 1 | 1472 | import matplotlib.pyplot as pyplot
import Moog960
import MoogTools
import numpy
import glob
correctionFiles = glob.glob('/home/deen/MoogPyData/AbsorptionLines/corrections/*Solar_results.dat')
solarSpectrum = Moog960.ObservedMelody.fromFile(filename='SolarSpectrum.fits')
solarSpectrum.loadData()
arcturusSpectrum = Moog960.ObservedMelody.fromFile(filename='ArcturusSpectrum.fits')
arcturusSpectrum.loadData()
solarSpectrum.selectPhrases(wlRange=[20000, 24000])
arcturusSpectrum.selectPhrases(wlRange=[20000, 24000])
observed, labels = solarSpectrum.perform()
solar = observed[0][0]
observed, labels = arcturusSpectrum.perform()
arcturus = observed[0][0]
wls = []
ycoord = []
left = []
width = []
height = []
counter = 0
for corrections in correctionFiles:
with open(corrections, 'r') as f:
lines = f.readlines()
for line in lines:
wls.append(float(line.split()[0]))
ycoord.append(1.0)
#print counter
#print wls[counter], wls[-1], wls[-2]
left.append(wls[counter])
width.append(wls[-1] - wls[counter])
height.append(1.0)
counter = len(wls)
fig = pyplot.figure(0)
fig.clear()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.plot(solar.wl, solar.flux_I, color = 'k')
ax.plot(arcturus.wl-5.5, arcturus.flux_I, color ='g')
#ax.scatter(wls, ycoord, marker='o', s=30, color = 'r')
ax.bar(left, height, width=width, alpha=0.5, color = 'r')
ax.set_xbound(20000, 23000)
ax.set_ybound(0.0, 1.1)
fig.show()
| mit |
mrares/incubator-airflow | airflow/www/views.py | 1 | 97991 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from past.builtins import basestring, unicode
import ast
import logging
import os
import pkg_resources
import socket
from functools import wraps
from datetime import datetime, timedelta
import dateutil.parser
import copy
import math
import json
import bleach
from collections import defaultdict
import inspect
from textwrap import dedent
import traceback
import sqlalchemy as sqla
from sqlalchemy import or_, desc, and_, union_all
from flask import (
redirect, url_for, request, Markup, Response, current_app, render_template, make_response)
from flask_admin import BaseView, expose, AdminIndexView
from flask_admin.contrib.sqla import ModelView
from flask_admin.actions import action
from flask_admin.babel import lazy_gettext
from flask_admin.tools import iterdecode
from flask_login import flash
from flask._compat import PY2
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2 import escape
import markdown
import nvd3
from wtforms import (
Form, SelectField, TextAreaField, PasswordField, StringField, validators)
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
import airflow
from airflow import configuration as conf
from airflow import models
from airflow import settings
from airflow.api.common.experimental.mark_tasks import set_dag_run_state
from airflow.exceptions import AirflowException
from airflow.settings import Session
from airflow.models import XCom, DagRun
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, SCHEDULER_DEPS
from airflow.models import BaseOperator
from airflow.operators.subdag_operator import SubDagOperator
from airflow.utils.json import json_ser
from airflow.utils.state import State
from airflow.utils.db import provide_session
from airflow.utils.helpers import alchemy_to_dict
from airflow.utils.dates import infer_time_unit, scale_time_units
from airflow.www import utils as wwwutils
from airflow.www.forms import DateTimeForm, DateTimeWithNumRunsForm
from airflow.www.validators import GreaterEqualThan
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
dagbag = models.DagBag(settings.DAGS_FOLDER)
login_required = airflow.login.login_required
current_user = airflow.login.current_user
logout_user = airflow.login.logout_user
FILTER_BY_OWNER = False
PAGE_SIZE = conf.getint('webserver', 'page_size')
if conf.getboolean('webserver', 'FILTER_BY_OWNER'):
# filter_by_owner if authentication is enabled and filter_by_owner is true
FILTER_BY_OWNER = not current_app.config['LOGIN_DISABLED']
def dag_link(v, c, m, p):
dag_id = bleach.clean(m.dag_id)
url = url_for(
'airflow.graph',
dag_id=dag_id)
return Markup(
'<a href="{}">{}</a>'.format(url, dag_id))
def log_url_formatter(v, c, m, p):
return Markup(
'<a href="{m.log_url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def task_instance_link(v, c, m, p):
dag_id = bleach.clean(m.dag_id)
task_id = bleach.clean(m.task_id)
url = url_for(
'airflow.task',
dag_id=dag_id,
task_id=task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=dag_id,
root=task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_token(state):
color = State.color(state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{state}</span>'.format(**locals()))
def state_f(v, c, m, p):
return state_token(m.state)
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if datetime.utcnow().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
def label_link(v, c, m, p):
try:
default_params = ast.literal_eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def render(obj, lexer):
out = ""
if isinstance(obj, basestring):
out += pygment_html_render(obj, lexer)
elif isinstance(obj, (tuple, list)):
for i, s in enumerate(obj):
out += "<div>List item #{}</div>".format(i)
out += "<div>" + pygment_html_render(s, lexer) + "</div>"
elif isinstance(obj, dict):
for k, v in obj.items():
out += '<div>Dict item "{}"</div>'.format(k)
out += "<div>" + pygment_html_render(v, lexer) + "</div>"
return out
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: render(x, lexers.BashLexer),
'hql': lambda x: render(x, lexers.SqlLexer),
'sql': lambda x: render(x, lexers.SqlLexer),
'doc': lambda x: render(x, lexers.TextLexer),
'doc_json': lambda x: render(x, lexers.JsonLexer),
'doc_rst': lambda x: render(x, lexers.RstLexer),
'doc_yaml': lambda x: render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: render(
inspect.getsource(x), lexers.PythonLexer),
}
def data_profiling_required(f):
"""Decorator for views requiring data profiling access"""
@wraps(f)
def decorated_function(*args, **kwargs):
if (
current_app.config['LOGIN_DISABLED'] or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
def recurse_tasks(tasks, task_ids, dag_ids, task_id_to_dag):
if isinstance(tasks, list):
for task in tasks:
recurse_tasks(task, task_ids, dag_ids, task_id_to_dag)
return
if isinstance(tasks, SubDagOperator):
subtasks = tasks.subdag.tasks
dag_ids.append(tasks.subdag.dag_id)
for subtask in subtasks:
if subtask.task_id not in task_ids:
task_ids.append(subtask.task_id)
task_id_to_dag[subtask.task_id] = tasks.subdag
recurse_tasks(subtasks, task_ids, dag_ids, task_id_to_dag)
if isinstance(tasks, BaseOperator):
task_id_to_dag[tasks.task_id] = tasks.dag
def get_chart_height(dag):
"""
TODO(aoen): See [AIRFLOW-1263] We use the number of tasks in the DAG as a heuristic to
approximate the size of generated chart (otherwise the charts are tiny and unreadable
when DAGs have a large number of tasks). Ideally nvd3 should allow for dynamic-height
charts, that is charts that take up space based on the size of the components within.
"""
return 600 + len(dag.tasks) * 10
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
from airflow import macros
import pandas as pd
session = settings.Session()
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).first()
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).first()
session.expunge_all()
session.commit()
session.close()
payload = {
"state": "ERROR",
"error": ""
}
# Processing templated fields
try:
args = ast.literal_eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
args.update(request_dict)
args['macros'] = macros
sandbox = ImmutableSandboxedEnvironment()
sql = sandbox.from_string(chart.sql).render(**args)
label = sandbox.from_string(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(
wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))
df = df.fillna(0)
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns' and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart.show_datatable or chart_type == "datatable":
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
payload['data'] = data
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
except Exception as e:
payload['error'] = "Time conversion failed"
if chart_type == 'datatable':
payload['state'] = 'SUCCESS'
return wwwutils.json_response(payload)
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
df = df.fillna(0)
NVd3ChartClass = chart_mapping.get(chart.chart_type)
NVd3ChartClass = getattr(nvd3, NVd3ChartClass)
nvd3_chart = NVd3ChartClass(x_is_date=chart.x_is_date)
for col in df.columns:
nvd3_chart.add_serie(name=col, y=df[col].tolist(), x=df[col].index.tolist())
try:
nvd3_chart.buildcontent()
payload['chart_type'] = nvd3_chart.__class__.__name__
payload['htmlcontent'] = nvd3_chart.htmlcontent
except Exception as e:
payload['error'] = str(e)
payload['state'] = 'SUCCESS'
payload['request_dict'] = request_dict
return wwwutils.json_response(payload)
@expose('/chart')
@data_profiling_required
def chart(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).first()
session.expunge_all()
session.commit()
session.close()
NVd3ChartClass = chart_mapping.get(chart.chart_type)
if not NVd3ChartClass:
flash(
"Not supported anymore as the license was incompatible, "
"sorry",
"danger")
redirect('/admin/chart/')
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/nvd3.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
def dag_stats(self):
ds = models.DagStat
session = Session()
ds.update()
qry = (
session.query(ds.dag_id, ds.state, ds.count)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.dag_states:
try:
count = data[dag.dag_id][state]
except Exception:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/task_stats')
def task_stats(self):
TI = models.TaskInstance
DagRun = models.DagRun
Dag = models.DagModel
session = Session()
LastDagRun = (
session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date'))
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state != State.RUNNING)
.filter(Dag.is_active == True)
.group_by(DagRun.dag_id)
.subquery('last_dag_run')
)
RunningDagRun = (
session.query(DagRun.dag_id, DagRun.execution_date)
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state == State.RUNNING)
.filter(Dag.is_active == True)
.subquery('running_dag_run')
)
# Select all task_instances from active dag_runs.
# If no dag_run is active, return task instances from most recent dag_run.
LastTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(LastDagRun, and_(
LastDagRun.c.dag_id == TI.dag_id,
LastDagRun.c.execution_date == TI.execution_date))
)
RunningTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(RunningDagRun, and_(
RunningDagRun.c.dag_id == TI.dag_id,
RunningDagRun.c.execution_date == TI.execution_date))
)
UnionTI = union_all(LastTI, RunningTI).alias('union_ti')
qry = (
session.query(UnionTI.c.dag_id, UnionTI.c.state, sqla.func.count())
.group_by(UnionTI.c.dag_id, UnionTI.c.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
session.close()
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.task_states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = dag_id
try:
with open(dag.fileloc, 'r') as f:
code = f.read()
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
except IOError as e:
html_code = str(e)
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@expose('/dag_details')
@login_required
def dag_details(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = "DAG details"
session = settings.Session()
TI = models.TaskInstance
states = (
session.query(TI.state, sqla.func.count(TI.dag_id))
.filter(TI.dag_id == dag_id)
.group_by(TI.state)
.all()
)
return self.render(
'airflow/dag_details.html',
dag=dag, title=title, states=states, State=State)
@current_app.errorhandler(404)
def circles(self):
return render_template(
'airflow/circles.html', hostname=socket.getfqdn()), 404
@current_app.errorhandler(500)
def show_traceback(self):
from airflow.utils import asciiart as ascii_
return render_template(
'airflow/traceback.html',
hostname=socket.getfqdn(),
nukular=ascii_.nukular,
info=traceback.format_exc()), 500
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/pickle_info')
@login_required
def pickle_info(self):
d = {}
dag_id = request.args.get('dag_id')
dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values()
for dag in dags:
if not dag.is_subdag:
d[dag.dag_id] = dag.pickle_info()
return wwwutils.json_response(d)
@expose('/login', methods=['GET', 'POST'])
def login(self):
return airflow.login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
flash('You have been logged out.')
return redirect(url_for('admin.index'))
@expose('/rendered')
@login_required
@wwwutils.action_logging
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title, )
@expose('/log')
@login_required
@wwwutils.action_logging
def log(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
session = Session()
ti = session.query(models.TaskInstance).filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm).first()
if ti is None:
logs = ["*** Task instance did not exist in the DB\n"]
else:
logger = logging.getLogger('airflow.task')
task_log_reader = conf.get('core', 'task_log_reader')
handler = next((handler for handler in logger.handlers
if handler.name == task_log_reader), None)
try:
ti.task = dag.get_task(ti.task_id)
logs = handler.read(ti)
except AttributeError as e:
logs = ["Task log handler {} does not support read logs.\n{}\n" \
.format(task_log_reader, e.message)]
for i, log in enumerate(logs):
if PY2 and not isinstance(log, unicode):
logs[i] = log.decode('utf-8')
return self.render(
'airflow/ti_log.html',
logs=logs, dag=dag, title="Log by attempts", task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
@wwwutils.action_logging
def task(self):
TI = models.TaskInstance
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
task = copy.copy(dag.get_task(task_id))
task.resolve_template_files()
ti = TI(task=task, execution_date=dttm)
ti.refresh_from_db()
ti_attrs = []
for attr_name in dir(ti):
if not attr_name.startswith('_'):
attr = getattr(ti, attr_name)
if type(attr) != type(self.task):
ti_attrs.append((attr_name, str(attr)))
task_attrs = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
task_attrs.append((attr_name, str(attr)))
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
no_failed_deps_result = [(
"Unknown",
dedent("""\
All dependencies are met but the task instance is not running. In most cases this just means that the task will probably be scheduled soon unless:<br/>
- The scheduler is down or under heavy load<br/>
{}
<br/>
If this task instance does not start soon please contact your Airflow administrator for assistance."""
.format(
"- This task instance already ran and had it's state changed manually (e.g. cleared in the UI)<br/>"
if ti.state == State.NONE else "")))]
# Use the scheduler's context to figure out which dependencies are not met
dep_context = DepContext(SCHEDULER_DEPS)
failed_dep_reasons = [(dep.dep_name, dep.reason) for dep in
ti.get_failed_dep_statuses(
dep_context=dep_context)]
title = "Task Instance Details"
return self.render(
'airflow/task.html',
task_attrs=task_attrs,
ti_attrs=ti_attrs,
failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/xcom')
@login_required
@wwwutils.action_logging
def xcom(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
session = Session()
xcomlist = session.query(XCom).filter(
XCom.dag_id == dag_id, XCom.task_id == task_id,
XCom.execution_date == dttm).all()
attributes = []
for xcom in xcomlist:
if not xcom.key.startswith('_'):
attributes.append((xcom.key, xcom.value))
title = "XCom"
return self.render(
'airflow/xcom.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
form=form,
dag=dag, title=title)
@expose('/run')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def run(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
ignore_all_deps = request.args.get('ignore_all_deps') == "true"
ignore_task_deps = request.args.get('ignore_task_deps') == "true"
ignore_ti_state = request.args.get('ignore_ti_state') == "true"
try:
from airflow.executors import GetDefaultExecutor
from airflow.executors.celery_executor import CeleryExecutor
executor = GetDefaultExecutor()
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
except ImportError:
# in case CeleryExecutor cannot be imported it is not active either
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
ti.refresh_from_db()
# Make sure the task instance can be queued
dep_context = DepContext(
deps=QUEUE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
if failed_deps:
failed_deps_str = ", ".join(
["{}: {}".format(dep.dep_name, dep.reason) for dep in failed_deps])
flash("Could not queue task instance for execution, dependencies not met: "
"{}".format(failed_deps_str),
"error")
return redirect(origin)
executor.start()
executor.queue_task_instance(
ti,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
@expose('/trigger')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def trigger(self):
dag_id = request.args.get('dag_id')
origin = request.args.get('origin') or "/admin/"
dag = dagbag.get_dag(dag_id)
if not dag:
flash("Cannot find dag {}".format(dag_id))
return redirect(origin)
execution_date = datetime.utcnow()
run_id = "manual__{0}".format(execution_date.isoformat())
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
flash("This run_id {} already exists".format(run_id))
return redirect(origin)
run_conf = {}
dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True
)
flash(
"Triggered {}, "
"it should start any moment now.".format(dag_id))
return redirect(origin)
def _clear_dag_tis(self, dags, start_date, end_date, origin,
recursive=False, confirmed=False):
if confirmed:
count = 0
for dag in dags:
count += dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
tis = []
for dag in dags:
tis.extend(dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
dry_run=True))
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=("Here's the list of task instances you are about "
"to clear:"),
details=details)
return response
@expose('/clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
recursive = request.args.get('recursive') == "true"
descendants = request.args.get('descendants') == "true"
dags = [dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)]
if descendants:
dags.extend(dag.descendants(
dagbag, task_ids=[task_id], include_downstream=downstream,
include_upstream=upstream, recursive=recursive))
end_date = execution_date if not future else None
start_date = execution_date if not past else None
return self._clear_dag_tis(dags, start_date, end_date, origin,
recursive=recursive, confirmed=confirmed)
@expose('/dagrun_clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == "true"
descendants = request.args.get('descendants') == "true"
dag = dagbag.get_dag(dag_id)
dags = [dag]
if descendants:
dags.extend(dag.descendants(dagbag, task_ids=[task_id], recursive=True))
execution_date = dateutil.parser.parse(execution_date)
start_date = execution_date
end_date = execution_date
return self._clear_dag_tis(dags, start_date, end_date, origin,
recursive=True, confirmed=confirmed)
@expose('/blocked')
@login_required
def blocked(self):
session = settings.Session()
DR = models.DagRun
dags = (
session.query(DR.dag_id, sqla.func.count(DR.id))
.filter(DR.state == State.RUNNING)
.group_by(DR.dag_id)
.all()
)
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
if dag_id in dagbag.dags:
max_active_runs = dagbag.dags[dag_id].max_active_runs
payload.append({
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
})
return wwwutils.json_response(payload)
@expose('/dagrun_success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_success(self):
dag_id = request.args.get('dag_id')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == 'true'
origin = request.args.get('origin')
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = dateutil.parser.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state(dag, execution_date, state=State.SUCCESS,
commit=confirmed)
if confirmed:
flash('Marked success on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render('airflow/confirm.html',
message=("Here's the list of task instances you are "
"about to mark as successful:"),
details=details)
return response
@expose('/success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def success(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
task.dag = dag
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
if not dag:
flash("Cannot find DAG: {}".format(dag_id))
return redirect(origin)
if not task:
flash("Cannot find task {} in DAG {}".format(task_id, dag.dag_id))
return redirect(origin)
from airflow.api.common.experimental.mark_tasks import set_state
if confirmed:
altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=State.SUCCESS,
commit=True)
flash("Marked success on {} task instances".format(len(altered)))
return redirect(origin)
to_be_altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=State.SUCCESS,
commit=False)
details = "\n".join([str(t) for t in to_be_altered])
response = self.render("airflow/confirm.html",
message=("Here's the list of task instances you are "
"about to mark as successful:"),
details=details)
return response
@expose('/tree')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def tree(self):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
session = settings.Session()
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
DR = models.DagRun
dag_runs = (
session.query(DR)
.filter(
DR.dag_id == dag.dag_id,
DR.execution_date <= base_date,
DR.execution_date >= min_date)
.all()
)
dag_runs = {
dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}
dates = sorted(list(dag_runs.keys()))
max_date = max(dates) if dates else None
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
task_instances = {}
for ti in tis:
tid = alchemy_to_dict(ti)
dr = dag_runs.get(ti.execution_date)
tid['external_trigger'] = dr['external_trigger'] if dr else False
task_instances[(ti.task_id, ti.execution_date)] = tid
expanded = []
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = [0]
node_limit = 5000 / max(1, len(dag.roots))
def recurse_nodes(task, visited):
visited.add(task)
node_count[0] += 1
children = [
recurse_nodes(t, visited) for t in task.upstream_list
if node_count[0] < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
def set_duration(tid):
if (isinstance(tid, dict) and tid.get("state") == State.RUNNING and
tid["start_date"] is not None):
d = datetime.utcnow() - dateutil.parser.parse(tid["start_date"])
tid["duration"] = d.total_seconds()
return tid
return {
'name': task.task_id,
'instances': [
set_duration(task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [
dag_runs.get(d) or {'execution_date': d.isoformat()}
for d in dates],
}
data = json.dumps(data, indent=4, default=json_ser)
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
form=form,
dag=dag, data=data, blur=blur)
@expose('/graph')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def graph(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
arrange = request.args.get('arrange', dag.orientation)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.utcnow().date()
DR = models.DagRun
drs = (
session.query(DR)
.filter_by(dag_id=dag_id)
.order_by(desc(DR.execution_date)).all()
)
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if dttm == dr.execution_date:
dr_state = dr.state
class GraphForm(Form):
execution_date = SelectField("DAG run", choices=dr_choices)
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
form = GraphForm(
data={'execution_date': dttm.isoformat(), 'arrange': arrange})
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks}
if not tasks:
flash("No tasks found", "error")
session.commit()
session.close()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') and dag.doc_md else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=state_token(dr_state),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2), )
@expose('/duration')
@login_required
@wwwutils.action_logging
def duration(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
cum_chart = nvd3.lineChart(
name="cumLineChart", x_is_date=True, height=chart_height, width="1200")
y = defaultdict(list)
x = defaultdict(list)
cum_y = defaultdict(list)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
TF = models.TaskFail
ti_fails = (
session
.query(TF)
.filter(
TF.dag_id == dag.dag_id,
TF.execution_date >= min_date,
TF.execution_date <= base_date,
TF.task_id.in_([t.task_id for t in dag.tasks]))
.all()
)
fails_totals = defaultdict(int)
for tf in ti_fails:
dict_key = (tf.dag_id, tf.task_id, tf.execution_date)
fails_totals[dict_key] += tf.duration
for ti in tis:
if ti.duration:
dttm = wwwutils.epoch(ti.execution_date)
x[ti.task_id].append(dttm)
y[ti.task_id].append(float(ti.duration))
fails_dict_key = (ti.dag_id, ti.task_id, ti.execution_date)
fails_total = fails_totals[fails_dict_key]
cum_y[ti.task_id].append(float(ti.duration + fails_total))
# determine the most relevant time unit for the set of task instance
# durations for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
cum_y_unit = infer_time_unit([d for t in cum_y.values() for d in t])
# update the y Axis on both charts to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(y_unit))
cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(cum_y_unit))
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
cum_chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(cum_y[task.task_id],
cum_y_unit))
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
cum_chart.buildcontent()
s_index = cum_chart.htmlcontent.rfind('});')
cum_chart.htmlcontent = (cum_chart.htmlcontent[:s_index] +
"$( document ).trigger('chartload')" +
cum_chart.htmlcontent[s_index:])
return self.render(
'airflow/duration_chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent,
cum_chart=cum_chart.htmlcontent
)
@expose('/tries')
@login_required
@wwwutils.action_logging
def tries(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, y_axis_format='d', height=chart_height,
width="1200")
for task in dag.tasks:
y = []
x = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
dttm = wwwutils.epoch(ti.execution_date)
x.append(dttm)
y.append(ti.try_number)
if x:
chart.add_serie(name=task.task_id, x=x, y=y)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
tries = sorted(list({ti.try_number for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if tries else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent
)
@expose('/landing_times')
@login_required
@wwwutils.action_logging
def landing_times(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
y = {}
x = {}
for task in dag.tasks:
y[task.task_id] = []
x[task.task_id] = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
ts = ti.execution_date
if dag.schedule_interval and dag.following_schedule(ts):
ts = dag.following_schedule(ts)
if ti.end_date:
dttm = wwwutils.epoch(ti.execution_date)
secs = (ti.end_date - ts).total_seconds()
x[ti.task_id].append(dttm)
y[ti.task_id].append(secs)
# determine the most relevant time unit for the set of landing times
# for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
# update the y Axis to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Landing Time ({})'.format(y_unit))
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
chart=chart.htmlcontent,
height=str(chart_height + 100) + "px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
)
@expose('/paused', methods=['POST'])
@login_required
@wwwutils.action_logging
def paused(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@login_required
@wwwutils.action_logging
def refresh(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = datetime.utcnow()
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect(request.referrer)
@expose('/refresh_all')
@login_required
@wwwutils.action_logging
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
@wwwutils.action_logging
def gantt(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.utcnow().date()
form = DateTimeForm(data={'execution_date': dttm})
tis = [
ti for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
tasks = []
for ti in tis:
end_date = ti.end_date if ti.end_date else datetime.utcnow()
tasks.append({
'startDate': wwwutils.epoch(ti.start_date),
'endDate': wwwutils.epoch(end_date),
'isoStart': ti.start_date.isoformat()[:-4],
'isoEnd': end_date.isoformat()[:-4],
'taskName': ti.task_id,
'duration': "{}".format(end_date - ti.start_date)[:-4],
'status': ti.state,
'executionDate': ti.execution_date.isoformat(),
})
states = {ti.state: ti.state for ti in tis}
data = {
'taskNames': [ti.task_id for ti in tis],
'tasks': tasks,
'taskStatus': states,
'height': len(tis) * 25 + 25,
}
session.commit()
session.close()
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
data=json.dumps(data, indent=2),
base_date='',
demo_mode=demo_mode,
root=root,
)
@expose('/object/task_instances')
@login_required
@wwwutils.action_logging
def task_instances(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
return ("Error: Invalid execution_date")
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
return json.dumps(task_instances)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
session = settings.Session()
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
# prevent XSS
form = escape(form)
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
@expose('/varimport', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def varimport(self):
try:
out = str(request.files['file'].read())
d = json.loads(out)
except Exception:
flash("Missing file or syntax error.")
else:
for k, v in d.items():
models.Variable.set(k, v, serialize_json=isinstance(v, dict))
flash("{} variable(s) successfully updated.".format(len(d)))
return redirect('/admin/variable')
class HomeView(AdminIndexView):
@expose("/")
@login_required
def index(self):
session = Session()
DM = models.DagModel
# restrict the dags shown if filter_by_owner and current user is not superuser
do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())
owner_mode = conf.get('webserver', 'OWNER_MODE').strip().lower()
hide_paused_dags_by_default = conf.getboolean('webserver',
'hide_paused_dags_by_default')
show_paused_arg = request.args.get('showPaused', 'None')
def get_int_arg(value, default=0):
try:
return int(value)
except ValueError:
return default
arg_current_page = request.args.get('page', '0')
arg_search_query = request.args.get('search', None)
dags_per_page = PAGE_SIZE
current_page = get_int_arg(arg_current_page, default=0)
if show_paused_arg.strip().lower() == 'false':
hide_paused = True
elif show_paused_arg.strip().lower() == 'true':
hide_paused = False
else:
hide_paused = hide_paused_dags_by_default
# read orm_dags from the db
sql_query = session.query(DM)
if do_filter and owner_mode == 'ldapgroup':
sql_query = sql_query.filter(
~DM.is_subdag,
DM.is_active,
DM.owners.in_(current_user.ldap_groups)
)
elif do_filter and owner_mode == 'user':
sql_query = sql_query.filter(
~DM.is_subdag, DM.is_active,
DM.owners == current_user.user.username
)
else:
sql_query = sql_query.filter(
~DM.is_subdag, DM.is_active
)
# optionally filter out "paused" dags
if hide_paused:
sql_query = sql_query.filter(~DM.is_paused)
orm_dags = {dag.dag_id: dag for dag
in sql_query
.all()}
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
session.expunge_all()
session.commit()
session.close()
# get a list of all non-subdag dags visible to everyone
# optionally filter out "paused" dags
if hide_paused:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag and not dag.is_paused]
else:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag]
# optionally filter to get only dags that the user should see
if do_filter and owner_mode == 'ldapgroup':
# only show dags owned by someone in @current_user.ldap_groups
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner in current_user.ldap_groups
}
elif do_filter and owner_mode == 'user':
# only show dags owned by @current_user.user.username
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner == current_user.user.username
}
else:
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
}
if arg_search_query:
lower_search_query = arg_search_query.lower()
# filter by dag_id
webserver_dags_filtered = {
dag_id: dag
for dag_id, dag in webserver_dags.items()
if (lower_search_query in dag_id.lower() or
lower_search_query in dag.owner.lower())
}
all_dag_ids = (set([dag.dag_id for dag in orm_dags.values()
if lower_search_query in dag.dag_id.lower() or
lower_search_query in dag.owners.lower()]) |
set(webserver_dags_filtered.keys()))
sorted_dag_ids = sorted(all_dag_ids)
else:
webserver_dags_filtered = webserver_dags
sorted_dag_ids = sorted(set(orm_dags.keys()) | set(webserver_dags.keys()))
start = current_page * dags_per_page
end = start + dags_per_page
num_of_all_dags = len(sorted_dag_ids)
page_dag_ids = sorted_dag_ids[start:end]
num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page)))
auto_complete_data = set()
for dag in webserver_dags_filtered.values():
auto_complete_data.add(dag.dag_id)
auto_complete_data.add(dag.owner)
for dag in orm_dags.values():
auto_complete_data.add(dag.dag_id)
auto_complete_data.add(dag.owners)
return self.render(
'airflow/dags.html',
webserver_dags=webserver_dags_filtered,
orm_dags=orm_dags,
hide_paused=hide_paused,
current_page=current_page,
search_query=arg_search_query if arg_search_query else '',
page_size=dags_per_page,
num_of_pages=num_of_pages,
num_dag_from=start + 1,
num_dag_to=min(end, num_of_all_dags),
num_of_all_dags=num_of_all_dags,
paging=wwwutils.generate_pages(current_page, num_of_pages,
search=arg_search_query,
showPaused=not hide_paused),
dag_ids_in_page=page_dag_ids,
auto_complete_data=auto_complete_data)
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/', methods=['POST', 'GET'])
@wwwutils.gzipped
def query(self):
session = settings.Session()
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.form.get('conn_id')
csv = request.form.get('csv') == "true"
sql = request.form.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))
# df = hook.get_pandas_df(sql)
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes=[
'table', 'table-bordered', 'table-striped', 'no-wrap'],
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
session.close()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
column_display_actions = True
page_size = PAGE_SIZE
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
form_args = {
'pool': {
'validators': [
validators.DataRequired(),
]
}
}
class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',
)
column_list = (
'label',
'conn_id',
'chart_type',
'owner',
'last_modified',
)
column_sortable_list = (
'label',
'conn_id',
'chart_type',
('owner', 'owner.username'),
'last_modified',
)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': [
(c.conn_id, c.conn_id)
for c in (
Session().query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if not model.user_id and current_user and hasattr(current_user, 'id'):
model.user_id = current_user.id
model.last_modified = datetime.utcnow()
chart_mapping = (
('line', 'lineChart'),
('spline', 'lineChart'),
('bar', 'multiBarChart'),
('column', 'multiBarChart'),
('area', 'stackedAreaChart'),
('stacked_area', 'stackedAreaChart'),
('percent_area', 'stackedAreaChart'),
('datatable', 'datatable'),
)
chart_mapping = dict(chart_mapping)
class KnownEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description',
)
form_args = {
'label': {
'validators': [
validators.DataRequired(),
],
},
'event_type': {
'validators': [
validators.DataRequired(),
],
},
'start_date': {
'validators': [
validators.DataRequired(),
],
},
'end_date': {
'validators': [
validators.DataRequired(),
GreaterEqualThan(fieldname='start_date'),
],
},
'reported_by': {
'validators': [
validators.DataRequired(),
],
}
}
column_list = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
)
column_default_sort = ("start_date", True)
column_sortable_list = (
'label',
('event_type', 'event_type.know_event_type'),
'start_date',
'end_date',
('reported_by', 'reported_by.username'),
)
class KnownEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
# NOTE: For debugging / troubleshooting
# mv = KnowEventTypeView(
# models.KnownEventType,
# Session, name="Known Event Types", category="Manage")
# admin.add_view(mv)
# class DagPickleView(SuperUserMixin, ModelView):
# pass
# mv = DagPickleView(
# models.DagPickle,
# Session, name="Pickles", category="Manage")
# admin.add_view(mv)
class VariableView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
list_template = 'airflow/variable_list.html'
def hidden_field_formatter(view, context, model, name):
if wwwutils.should_hide_value_for_key(model.key):
return Markup('*' * 8)
try:
return getattr(model, name)
except AirflowException:
return Markup('<span class="label label-danger">Invalid</span>')
form_columns = (
'key',
'val',
)
column_list = ('key', 'val', 'is_encrypted',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val')
column_default_sort = ('key', False)
form_widget_args = {
'is_encrypted': {'disabled': True},
'val': {
'rows': 20,
}
}
form_args = {
'key': {
'validators': {
validators.DataRequired(),
},
},
}
column_sortable_list = (
'key',
'val',
'is_encrypted',
)
column_formatters = {
'val': hidden_field_formatter,
}
# Default flask-admin export functionality doesn't handle serialized json
@action('varexport', 'Export', None)
def action_varexport(self, ids):
V = models.Variable
session = settings.Session()
qry = session.query(V).filter(V.id.in_(ids)).all()
session.close()
var_dict = {}
d = json.JSONDecoder()
for var in qry:
val = None
try:
val = d.decode(var.val)
except:
val = var.val
var_dict[var.key] = val
response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))
response.headers["Content-Disposition"] = "attachment; filename=variables.json"
return response
def on_form_prefill(self, form, id):
if wwwutils.should_hide_value_for_key(form.key.data):
form.val.data = '*' * 8
class XComView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "XCom"
verbose_name_plural = "XComs"
form_columns = (
'key',
'value',
'execution_date',
'task_id',
'dag_id',
)
form_extra_fields = {
'value': StringField('Value'),
}
column_filters = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
column_searchable_list = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_display_actions = False
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
class DagRunModelView(ModelViewOnly):
verbose_name_plural = "DAG Runs"
can_edit = True
can_create = True
column_editable_list = ('state',)
verbose_name = "dag run"
column_default_sort = ('execution_date', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
form_args = dict(
dag_id=dict(validators=[validators.DataRequired()])
)
column_list = (
'state', 'dag_id', 'execution_date', 'run_id', 'external_trigger')
column_filters = column_list
column_searchable_list = ('dag_id', 'state', 'run_id')
column_formatters = dict(
execution_date=datetime_f,
state=state_f,
start_date=datetime_f,
dag_id=dag_link)
@action('new_delete', "Delete", "Are you sure you want to delete selected records?")
def action_new_delete(self, ids):
session = settings.Session()
deleted = set(session.query(models.DagRun)
.filter(models.DagRun.id.in_(ids))
.all())
session.query(models.DagRun) \
.filter(models.DagRun.id.in_(ids)) \
.delete(synchronize_session='fetch')
session.commit()
dirty_ids = []
for row in deleted:
dirty_ids.append(row.dag_id)
models.DagStat.update(dirty_ids, dirty_only=False, session=session)
session.close()
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_dagrun_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_dagrun_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_dagrun_state(ids, State.SUCCESS)
@provide_session
def set_dagrun_state(self, ids, target_state, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
dirty_ids.append(dr.dag_id)
count += 1
dr.state = target_state
if target_state == State.RUNNING:
dr.start_date = datetime.utcnow()
else:
dr.end_date = datetime.utcnow()
session.commit()
models.DagStat.update(dirty_ids, session=session)
flash(
"{count} dag runs were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_display_actions = False
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date')
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator', 'start_date', 'end_date')
named_filter_urls = True
column_formatters = dict(
log_url=log_url_formatter,
task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
queued_dttm=datetime_f,
dag_id=dag_link, duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('job_id', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
column_list = (
'state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',
'pool', 'log_url')
can_delete = True
page_size = PAGE_SIZE
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_task_instance_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_task_instance_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_task_instance_state(ids, State.SUCCESS)
@action('set_retry', "Set state to 'up_for_retry'", None)
def action_set_retry(self, ids):
self.set_task_instance_state(ids, State.UP_FOR_RETRY)
@action('delete',
lazy_gettext('Delete'),
lazy_gettext('Are you sure you want to delete selected records?'))
def action_delete(self, ids):
"""
As a workaround for AIRFLOW-277, this method overrides Flask-Admin's ModelView.action_delete().
TODO: this method should be removed once the below bug is fixed on Flask-Admin side.
https://github.com/flask-admin/flask-admin/issues/1226
"""
if 'sqlite' in conf.get('core', 'sql_alchemy_conn'):
self.delete_task_instances(ids)
else:
super(TaskInstanceModelView, self).action_delete(ids)
@provide_session
def set_task_instance_state(self, ids, target_state, session=None):
try:
TI = models.TaskInstance
count = len(ids)
for id in ids:
task_id, dag_id, execution_date = id.split(',')
execution_date = datetime.strptime(execution_date, '%Y-%m-%d %H:%M:%S')
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).one()
ti.state = target_state
session.commit()
flash(
"{count} task instances were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
@provide_session
def delete_task_instances(self, ids, session=None):
try:
TI = models.TaskInstance
count = 0
for id in ids:
task_id, dag_id, execution_date = id.split(',')
execution_date = datetime.strptime(execution_date, '%Y-%m-%d %H:%M:%S')
count += session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).delete()
session.commit()
flash("{count} task instances were deleted".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to delete', 'error')
def get_one(self, id):
"""
As a workaround for AIRFLOW-252, this method overrides Flask-Admin's ModelView.get_one().
TODO: this method should be removed once the below bug is fixed on Flask-Admin side.
https://github.com/flask-admin/flask-admin/issues/1226
"""
task_id, dag_id, execution_date = iterdecode(id)
execution_date = dateutil.parser.parse(execution_date)
return self.session.query(self.model).get((task_id, dag_id, execution_date))
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
create_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
list_template = 'airflow/conn_list.html'
form_columns = (
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
'extra__jdbc__drv_path',
'extra__jdbc__drv_clsname',
'extra__google_cloud_platform__project',
'extra__google_cloud_platform__key_path',
'extra__google_cloud_platform__keyfile_dict',
'extra__google_cloud_platform__scope',
)
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted', 'is_extra_encrypted',)
form_overrides = dict(_password=PasswordField, _extra=TextAreaField)
form_widget_args = {
'is_extra_encrypted': {'disabled': True},
'is_encrypted': {'disabled': True},
}
# Used to customized the form, the forms elements get rendered
# and results are stored in the extra field as json. All of these
# need to be prefixed with extra__ and then the conn_type ___ as in
# extra__{conn_type}__name. You can also hide form elements and rename
# others from the connection_form.js file
form_extra_fields = {
'extra__jdbc__drv_path': StringField('Driver Path'),
'extra__jdbc__drv_clsname': StringField('Driver Class'),
'extra__google_cloud_platform__project': StringField('Project Id'),
'extra__google_cloud_platform__key_path': StringField('Keyfile Path'),
'extra__google_cloud_platform__keyfile_dict': PasswordField('Keyfile JSON'),
'extra__google_cloud_platform__scope': StringField('Scopes (comma seperated)'),
}
form_choices = {
'conn_type': models.Connection._types
}
def on_model_change(self, form, model, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc', 'google_cloud_platform']:
extra = {
key: formdata[key]
for key in self.form_extra_fields.keys() if key in formdata}
model.extra = json.dumps(extra)
@classmethod
def alert_fernet_key(cls):
fk = None
try:
fk = conf.get('core', 'fernet_key')
except:
pass
return fk is None
@classmethod
def is_secure(cls):
"""
Used to display a message in the Connection list view making it clear
that the passwords and `extra` field can't be encrypted.
"""
is_secure = False
try:
import cryptography
conf.get('core', 'fernet_key')
is_secure = True
except:
pass
return is_secure
def on_form_prefill(self, form, id):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception:
d = {}
for field in list(self.form_extra_fields.keys()):
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
class VersionView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def version(self):
# Look at the version from setup.py
try:
airflow_version = pkg_resources.require("apache-airflow")[0].version
except Exception as e:
airflow_version = None
logging.error(e)
# Get the Git repo and git hash
git_version = None
try:
with open(os.path.join(*[settings.AIRFLOW_HOME, 'airflow', 'git_version'])) as f:
git_version = f.readline()
except Exception as e:
logging.error(e)
# Render information
title = "Version Info"
return self.render('airflow/version.html',
title=title,
airflow_version=airflow_version,
git_version=git_version)
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = conf.AIRFLOW_CONFIG
if conf.getboolean("webserver", "expose_config"):
with open(conf.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
table = [(section, key, value, source)
for section, parameters in conf.as_dict(True, True).items()
for key, (value, source) in parameters.items()]
else:
config = (
"# You Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
table = None
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/config.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle,
table=table)
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'is_paused': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = PAGE_SIZE
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
| apache-2.0 |
yuxiang-zhou/menpofit | menpofit/aps/base.py | 2 | 39189 | from __future__ import division
import warnings
import numpy as np
from scipy.stats import multivariate_normal
from menpo.base import name_of_callable
from menpo.feature import no_op
from menpo.visualize import print_dynamic, print_progress
from menpo.model import GMRFModel
from menpo.shape import (DirectedGraph, UndirectedGraph, Tree, PointTree,
PointDirectedGraph, PointUndirectedGraph)
from menpofit import checks
from menpofit.base import batch
from menpofit.modelinstance import OrthoPDM
from menpofit.builder import (compute_features, scale_images, align_shapes,
rescale_images_to_reference_shape,
extract_patches, MenpoFitBuilderWarning,
compute_reference_shape)
class GenerativeAPS(object):
r"""
Class for training a multi-scale Generative Active Pictorial Structures
model. Please see the references for a basic list of relevant papers.
Parameters
----------
images : `list` of `menpo.image.Image`
The `list` of training images.
group : `str` or ``None``, optional
The landmark group that will be used to train the AAM. If ``None`` and
the images only have a single landmark group, then that is the one
that will be used. Note that all the training images need to have the
specified landmark group.
appearance_graph : `list` of graphs or a single graph or ``None``, optional
The graph to be used for the appearance `menpo.model.GMRFModel` training.
It must be a `menpo.shape.UndirectedGraph`. If ``None``, then a
`menpo.model.PCAModel` is used instead.
shape_graph : `list` of graphs or a single graph or ``None``, optional
The graph to be used for the shape `menpo.model.GMRFModel` training. It
must be a `menpo.shape.UndirectedGraph`. If ``None``, then the shape
model is built using `menpo.model.PCAModel`.
deformation_graph : `list` of graphs or a single graph or ``None``, optional
The graph to be used for the deformation `menpo.model.GMRFModel`
training. It must be either a `menpo.shape.DirectedGraph` or a
`menpo.shape.Tree`. If ``None``, then the minimum spanning tree of the
data is computed.
holistic_features : `closure` or `list` of `closure`, optional
The features that will be extracted from the training images. Note
that the features are extracted before warping the images to the
reference shape. If `list`, then it must define a feature function per
scale. Please refer to `menpo.feature` for a list of potential features.
reference_shape : `menpo.shape.PointCloud` or ``None``, optional
The reference shape that will be used for building the APS. The purpose
of the reference shape is to normalise the size of the training images.
The normalization is performed by rescaling all the training images
so that the scale of their ground truth shapes matches the scale of
the reference shape. Note that the reference shape is rescaled with
respect to the `diagonal` before performing the normalisation. If
``None``, then the mean shape will be used.
diagonal : `int` or ``None``, optional
This parameter is used to rescale the reference shape so that the
diagonal of its bounding box matches the provided value. In other
words, this parameter controls the size of the model at the highest
scale. If ``None``, then the reference shape does not get rescaled.
scales : `float` or `tuple` of `float`, optional
The scale value of each scale. They must provided in ascending order,
i.e. from lowest to highest scale. If `float`, then a single scale is
assumed.
patch_shape : (`int`, `int`) or `list` of (`int`, `int`), optional
The shape of the patches to be extracted. If a `list` is provided,
then it defines a patch shape per scale.
patch_normalisation : `list` of `callable` or a single `callable`, optional
The normalisation function to be applied on the extracted patches. If
`list`, then it must have length equal to the number of scales. If a
single patch normalization `callable`, then this is the one applied to
all scales.
use_procrustes : `bool`, optional
If ``True``, then Generalized Procrustes Alignment is applied before
building the deformation model.
precision_dtype : `numpy.dtype`, optional
The data type of the appearance GMRF's precision matrix. For example, it
can be set to `numpy.float32` for single precision or to `numpy.float64`
for double precision. Even though the precision matrix is stored as a
`scipy.sparse` matrix, this parameter has a big impact on the amount of
memory required by the model.
max_shape_components : `int`, `float`, `list` of those or ``None``, optional
The number of shape components to keep. If `int`, then it sets the exact
number of components. If `float`, then it defines the variance
percentage that will be kept. If `list`, then it should
define a value per scale. If a single number, then this will be
applied to all scales. If ``None``, then all the components are kept.
Note that the unused components will be permanently trimmed.
n_appearance_components : `list` of `int` or `int` or ``None``, optional
The number of appearance components used for building the appearance
`menpo.shape.GMRFModel`. If `list`, then it must have length equal to
the number of scales. If a single `int`, then this is the one applied
to all scales. If ``None``, the covariance matrix of each edge is
inverted using `np.linalg.inv`. If `int`, it is inverted using
truncated SVD using the specified number of components.
can_be_incremented : `bool`, optional
In case you intend to incrementally update the model in the future,
then this flag must be set to ``True`` from the first place. Note
that if ``True``, the appearance and deformation `menpo.shape.GMRFModel`
models will occupy double memory.
verbose : `bool`, optional
If ``True``, then the progress of building the APS will be printed.
batch_size : `int` or ``None``, optional
If an `int` is provided, then the training is performed in an
incremental fashion on image batches of size equal to the provided
value. If ``None``, then the training is performed directly on the
all the images.
References
----------
.. [1] E. Antonakos, J. Alabort-i-Medina, and S. Zafeiriou, "Active
Pictorial Structures", Proceedings of the IEEE Conference on Computer
Vision and Pattern Recognition (CVPR), Boston, MA, USA, pp. 1872-1882,
June 2015.
"""
def __init__(self, images, group=None, appearance_graph=None,
shape_graph=None, deformation_graph=None,
holistic_features=no_op, reference_shape=None, diagonal=None,
scales=(0.5, 1.0), patch_shape=(17, 17),
patch_normalisation=no_op, use_procrustes=True,
precision_dtype=np.float32, max_shape_components=None,
n_appearance_components=None, can_be_incremented=False,
verbose=False, batch_size=None):
# Check parameters
checks.check_diagonal(diagonal)
scales = checks.check_scales(scales)
n_scales = len(scales)
holistic_features = checks.check_callable(holistic_features, n_scales)
patch_shape = checks.check_patch_shape(patch_shape, n_scales)
patch_normalisation = checks.check_callable(patch_normalisation,
n_scales)
max_shape_components = checks.check_max_components(
max_shape_components, n_scales, 'max_shape_components')
n_appearance_components = checks.check_max_components(
n_appearance_components, n_scales, 'n_appearance_components')
# Assign attributes
self.diagonal = diagonal
self.scales = scales
self.holistic_features = holistic_features
self.patch_shape = patch_shape
self.patch_normalisation = patch_normalisation
self.reference_shape = reference_shape
self.use_procrustes = use_procrustes
self.is_incremental = can_be_incremented
self.precision_dtype = precision_dtype
self.max_shape_components = max_shape_components
self.n_appearance_components = n_appearance_components
# Check provided graphs
self.appearance_graph = checks.check_graph(
appearance_graph, UndirectedGraph, 'appearance_graph', n_scales)
self.shape_graph = checks.check_graph(shape_graph, UndirectedGraph,
'shape_graph', n_scales)
self.deformation_graph = checks.check_graph(
deformation_graph, [DirectedGraph, Tree], 'deformation_graph',
n_scales)
# Initialize models' lists
self.shape_models = []
self.appearance_models = []
self.deformation_models = []
# Train APS
self._train(images, increment=False, group=group, batch_size=batch_size,
verbose=verbose)
def _train(self, images, increment=False, group=None, batch_size=None,
verbose=False):
# If batch_size is not None, then we may have a generator, else we
# assume we have a list.
if batch_size is not None:
# Create a generator of fixed sized batches. Will still work even
# on an infinite list.
image_batches = batch(images, batch_size)
else:
image_batches = [list(images)]
for k, image_batch in enumerate(image_batches):
if k == 0:
if self.reference_shape is None:
# If no reference shape was given, use the mean of the first
# batch
if batch_size is not None:
warnings.warn('No reference shape was provided. The '
'mean of the first batch will be the '
'reference shape. If the batch mean is '
'not representative of the true mean, '
'this may cause issues.',
MenpoFitBuilderWarning)
self.reference_shape = compute_reference_shape(
[i.landmarks[group].lms for i in image_batch],
self.diagonal, verbose=verbose)
# After the first batch, we are incrementing the model
if k > 0:
increment = True
if verbose:
print('Computing batch {}'.format(k))
# Train each batch
self._train_batch(
image_batch, increment=increment, group=group, verbose=verbose)
def _train_batch(self, image_batch, increment=False, group=None,
verbose=False):
# Rescale to existing reference shape
image_batch = rescale_images_to_reference_shape(
image_batch, group, self.reference_shape, verbose=verbose)
# If the deformation graph was not provided (None given), then compute
# the MST
if None in self.deformation_graph:
graph_shapes = [i.landmarks[group].lms for i in image_batch]
deformation_mst = _compute_minimum_spanning_tree(
graph_shapes, root_vertex=0, prefix='- ', verbose=verbose)
self.deformation_graph = [deformation_mst if g is None else g
for g in self.deformation_graph]
# Build models at each scale
if verbose:
print_dynamic('- Building models\n')
feature_images = []
# for each scale (low --> high)
for j in range(self.n_scales):
if verbose:
if len(self.scales) > 1:
scale_prefix = ' - Scale {}: '.format(j)
else:
scale_prefix = ' - '
else:
scale_prefix = None
# Handle holistic features
if j == 0 and self.holistic_features[j] == no_op:
# Saves a lot of memory
feature_images = image_batch
elif (j == 0 or self.holistic_features[j] is not
self.holistic_features[j - 1]):
# Compute features only if this is the first pass through
# the loop or the features at this scale are different from
# the features at the previous scale
feature_images = compute_features(image_batch,
self.holistic_features[j],
prefix=scale_prefix,
verbose=verbose)
# handle scales
if self.scales[j] != 1:
# Scale feature images only if scale is different than 1
scaled_images = scale_images(feature_images, self.scales[j],
prefix=scale_prefix,
verbose=verbose)
else:
scaled_images = feature_images
# Extract potentially rescaled shapes
scale_shapes = [i.landmarks[group].lms for i in scaled_images]
# Apply procrustes to align the shapes
aligned_shapes = align_shapes(scale_shapes)
# Build the shape model using the aligned shapes
if verbose:
print_dynamic('{}Building shape model'.format(scale_prefix))
if not increment:
self.shape_models.append(self._build_shape_model(
aligned_shapes, self.shape_graph[j],
self.max_shape_components[j], verbose=verbose))
else:
self.shape_models[j].increment(aligned_shapes, verbose=verbose)
# Build the deformation model
if verbose:
print_dynamic('{}Building deformation model'.format(
scale_prefix))
if self.use_procrustes:
deformation_shapes = aligned_shapes
else:
deformation_shapes = scale_shapes
if not increment:
self.deformation_models.append(self._build_deformation_model(
deformation_shapes, self.deformation_graph[j],
verbose=verbose))
else:
self.deformation_models[j].increment(deformation_shapes,
verbose=verbose)
# Obtain warped images
warped_images = self._warp_images(scaled_images, scale_shapes,
j, scale_prefix, verbose)
# Build the appearance model
if verbose:
print_dynamic('{}Building appearance model'.format(
scale_prefix))
if not increment:
self.appearance_models.append(self._build_appearance_model(
warped_images, self.appearance_graph[j],
self.n_appearance_components[j], verbose=verbose))
else:
self._increment_appearance_model(
warped_images, self.appearance_graph[j],
self.appearance_models[j], verbose=verbose)
if verbose:
print_dynamic('{}Done\n'.format(scale_prefix))
def increment(self, images, group=None, batch_size=None, verbose=False):
r"""
Method that incrementally updates the APS model with a new batch of
training images.
Parameters
----------
images : `list` of `menpo.image.Image`
The `list` of training images.
group : `str` or ``None``, optional
The landmark group that will be used to train the APS. If ``None``
and the images only have a single landmark group, then that is the
one that will be used. Note that all the training images need to
have the specified landmark group.
batch_size : `int` or ``None``, optional
If an `int` is provided, then the training is performed in an
incremental fashion on image batches of size equal to the provided
value. If ``None``, then the training is performed directly on the
all the images.
verbose : `bool`, optional
If ``True``, then the progress of building the APS will be printed.
"""
return self._train(images, increment=True, group=group,
verbose=verbose, batch_size=batch_size)
def _build_shape_model(self, shapes, shape_graph, max_shape_components,
verbose=False):
# if the provided graph is None, then apply PCA, else use the GMRF
if shape_graph is not None:
pca_model = GMRFModel(
shapes, shape_graph, mode='concatenation', n_components=None,
dtype=np.float64, sparse=False, incremental=self.is_incremental,
verbose=verbose).principal_components_analysis()
return OrthoPDM(pca_model, max_n_components=max_shape_components)
else:
return OrthoPDM(shapes, max_n_components=max_shape_components)
def _build_deformation_model(self, shapes, deformation_graph,
verbose=False):
return GMRFModel(shapes, deformation_graph, mode='subtraction',
n_components=None, dtype=np.float64, sparse=False,
incremental=self.is_incremental, verbose=verbose)
def _build_appearance_model(self, images, appearance_graph,
n_appearance_components, verbose=False):
if appearance_graph is not None:
return GMRFModel(images, appearance_graph, mode='concatenation',
n_components=n_appearance_components,
dtype=self.precision_dtype, sparse=True,
incremental=self.is_incremental, verbose=verbose)
else:
raise NotImplementedError('The full appearance model is not '
'implemented yet.')
def _increment_appearance_model(self, images, appearance_graph,
appearance_model, verbose=False):
if appearance_graph is not None:
appearance_model.increment(images, verbose=verbose)
else:
raise NotImplementedError('The full appearance model is not '
'implemented yet.')
def _warp_images(self, images, shapes, scale_index, prefix, verbose):
return extract_patches(
images, shapes, self.patch_shape[scale_index],
normalise_function=self.patch_normalisation[scale_index],
prefix=prefix, verbose=verbose)
@property
def n_scales(self):
"""
Returns the number of scales.
:type: `int`
"""
return len(self.scales)
@property
def _str_title(self):
r"""
Returns a string containing name of the model.
:type: `str`
"""
return 'Generative Active Pictorial Structures'
def instance(self, shape_weights=None, scale_index=-1, as_graph=False):
r"""
Generates an instance of the shape model.
Parameters
----------
shape_weights : ``(n_weights,)`` `ndarray` or `list` or ``None``, optional
The weights of the shape model that will be used to create a novel
shape instance. If ``None``, the weights are assumed to be zero,
thus the mean shape is used.
scale_index : `int`, optional
The scale to be used.
as_graph : `bool`, optional
If ``True``, then the instance will be returned as a
`menpo.shape.PointTree` or a `menpo.shape.PointDirectedGraph`,
depending on the type of the deformation graph.
"""
if shape_weights is None:
shape_weights = [0]
sm = self.shape_models[scale_index].model
shape_instance = sm.instance(shape_weights, normalized_weights=True)
if as_graph:
if isinstance(self.deformation_graph[scale_index], Tree):
shape_instance = PointTree(
shape_instance.points,
self.deformation_graph[scale_index].adjacency_matrix,
self.deformation_graph[scale_index].root_vertex)
else:
shape_instance = PointDirectedGraph(
shape_instance.points,
self.deformation_graph[scale_index].adjacency_matrix)
return shape_instance
def random_instance(self, scale_index=-1, as_graph=False):
r"""
Generates a random instance of the APS.
Parameters
----------
scale_index : `int`, optional
The scale to be used.
as_graph : `bool`, optional
If ``True``, then the instance will be returned as a
`menpo.shape.PointTree` or a `menpo.shape.PointDirectedGraph`,
depending on the type of the deformation graph.
"""
shape_weights = np.random.randn(
self.shape_models[scale_index].n_active_components)
return self.instance(shape_weights, scale_index=scale_index,
as_graph=as_graph)
def view_shape_models_widget(self, n_parameters=5,
parameters_bounds=(-3.0, 3.0),
mode='multiple', figure_size=(10, 8)):
r"""
Visualizes the shape models of the APS object using an interactive
widget.
Parameters
----------
n_parameters : `int` or `list` of `int` or ``None``, optional
The number of shape principal components to be used for the
parameters sliders. If `int`, then the number of sliders per
scale is the minimum between `n_parameters` and the number of
active components per scale. If `list` of `int`, then a number of
sliders is defined per scale. If ``None``, all the active
components per scale will have a slider.
parameters_bounds : ``(float, float)``, optional
The minimum and maximum bounds, in std units, for the sliders.
mode : {``single``, ``multiple``}, optional
If ``'single'``, only a single slider is constructed along with a
drop down menu. If ``'multiple'``, a slider is constructed for
each parameter.
figure_size : (`int`, `int`), optional
The size of the rendered figure.
"""
try:
from menpowidgets import visualize_shape_model
visualize_shape_model(
[sm.model for sm in self.shape_models],
n_parameters=n_parameters, parameters_bounds=parameters_bounds,
figure_size=figure_size, mode=mode)
except ImportError:
from menpo.visualize.base import MenpowidgetsMissingError
raise MenpowidgetsMissingError()
def view_shape_graph_widget(self, scale_index=-1, figure_size=(10, 8)):
r"""
Visualize the shape graph using an interactive widget.
Parameters
----------
scale_index : `int`, optional
The scale to be used.
figure_size : (`int`, `int`), optional
The size of the rendered figure.
Raises
------
ValueError
Scale level {scale_index} uses a PCA shape model, so there is no
graph
"""
if self.shape_graph[scale_index] is not None:
PointUndirectedGraph(
self.shape_models[scale_index].model.mean().points,
self.shape_graph[scale_index].adjacency_matrix).view_widget(
figure_size=figure_size)
else:
raise ValueError("Scale level {} uses a PCA shape model, so there "
"is no graph".format(scale_index))
def view_deformation_graph_widget(self, scale_index=-1,
figure_size=(10, 8)):
r"""
Visualize the deformation graph using an interactive widget.
Parameters
----------
scale_index : `int`, optional
The scale to be used.
figure_size : (`int`, `int`), optional
The size of the rendered figure.
"""
if isinstance(self.deformation_graph[scale_index], Tree):
dg = PointTree(self.shape_models[scale_index].model.mean().points,
self.deformation_graph[scale_index].adjacency_matrix,
self.deformation_graph[scale_index].root_vertex)
else:
dg = PointDirectedGraph(
self.shape_models[scale_index].model.mean().points,
self.deformation_graph[scale_index].adjacency_matrix)
dg.view_widget(figure_size=figure_size)
def view_appearance_graph_widget(self, scale_index=-1, figure_size=(10, 8)):
r"""
Visualize the appearance graph using an interactive widget.
Parameters
----------
scale_index : `int`, optional
The scale to be used.
figure_size : (`int`, `int`), optional
The size of the rendered figure.
Raises
------
ValueError
Scale level {scale_index} uses a PCA appearance model, so there
is no graph
"""
if self.appearance_graph[scale_index] is not None:
PointUndirectedGraph(
self.shape_models[scale_index].model.mean().points,
self.appearance_graph[scale_index].adjacency_matrix).\
view_widget(figure_size=figure_size)
else:
raise ValueError("Scale level {} uses a PCA appearance model, "
"so there is no graph".format(scale_index))
def view_deformation_model(self, scale_index=-1, n_std=2,
render_colour_bar=False, colour_map='jet',
image_view=True, figure_id=None,
new_figure=False, render_graph_lines=True,
graph_line_colour='b', graph_line_style='-',
graph_line_width=1., ellipse_line_colour='r',
ellipse_line_style='-', ellipse_line_width=1.,
render_markers=True, marker_style='o',
marker_size=5, marker_face_colour='k',
marker_edge_colour='k', marker_edge_width=1.,
render_axes=False,
axes_font_name='sans-serif', axes_font_size=10,
axes_font_style='normal',
axes_font_weight='normal', crop_proportion=0.1,
figure_size=(10, 8)):
r"""
Visualize the deformation model by plotting a Gaussian ellipsis per
graph edge.
Parameters
----------
scale_index : `int`, optional
The scale to be used.
n_std : `float`, optional
This defines the size of the ellipses in terms of number of standard
deviations.
render_colour_bar : `bool`, optional
If ``True``, then the ellipses will be coloured based on their
normalized standard deviations and a colour bar will also appear on
the side. If ``False``, then all the ellipses will have the same
colour.
colour_map : `str`, optional
A valid Matplotlib colour map. For more info, please refer to
`matplotlib.cm`.
image_view : `bool`, optional
If ``True`` the ellipses will be rendered in the image coordinates
system.
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
render_graph_lines : `bool`, optional
Defines whether to plot the graph's edges.
graph_line_colour : See Below, optional
The colour of the lines of the graph's edges.
Example options::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
graph_line_style : ``{-, --, -., :}``, optional
The style of the lines of the graph's edges.
graph_line_width : `float`, optional
The width of the lines of the graph's edges.
ellipse_line_colour : See Below, optional
The colour of the lines of the ellipses.
Example options::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
ellipse_line_style : ``{-, --, -., :}``, optional
The style of the lines of the ellipses.
ellipse_line_width : `float`, optional
The width of the lines of the ellipses.
render_markers : `bool`, optional
If ``True``, the centers of the ellipses will be rendered.
marker_style : See Below, optional
The style of the centers of the ellipses. Example options ::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int`, optional
The size of the centers of the ellipses in points.
marker_face_colour : See Below, optional
The face (filling) colour of the centers of the ellipses.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_colour : See Below, optional
The edge colour of the centers of the ellipses.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_width : `float`, optional
The edge width of the centers of the ellipses.
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : ``{normal, italic, oblique}``, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold,demibold, demi, bold, heavy, extra bold, black}
crop_proportion : `float`, optional
The proportion to be left around the centers' pointcloud.
figure_size : (`float`, `float`) `tuple` or ``None`` optional
The size of the figure in inches.
"""
from menpo.visualize import plot_gaussian_ellipses
mean_shape = self.shape_models[scale_index].model.mean().points
deformation_graph = self.deformation_graph[scale_index]
# get covariance matrices
covariances = []
means = []
for e in range(deformation_graph.n_edges):
# find vertices
parent = deformation_graph.edges[e, 0]
child = deformation_graph.edges[e, 1]
# relative location mean
means.append(mean_shape[child, :])
# relative location cov
s1 = -self.deformation_models[scale_index].precision[2 * child,
2 * parent]
s2 = -self.deformation_models[scale_index].precision[2 * child + 1,
2 * parent + 1]
s3 = -self.deformation_models[scale_index].precision[2 * child,
2 * parent + 1]
covariances.append(np.linalg.inv(np.array([[s1, s3], [s3, s2]])))
# plot deformation graph
if isinstance(deformation_graph, Tree):
renderer = PointTree(
mean_shape,
deformation_graph.adjacency_matrix,
deformation_graph.root_vertex).view(
figure_id=figure_id, new_figure=new_figure,
image_view=image_view, render_lines=render_graph_lines,
line_colour=graph_line_colour, line_style=graph_line_style,
line_width=graph_line_width, render_markers=render_markers,
marker_style=marker_style, marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width, render_axes=render_axes,
axes_font_name=axes_font_name, axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, figure_size=figure_size)
else:
renderer = PointDirectedGraph(
mean_shape,
deformation_graph.adjacency_matrix).view(
figure_id=figure_id, new_figure=new_figure,
image_view=image_view, render_lines=render_graph_lines,
line_colour=graph_line_colour, line_style=graph_line_style,
line_width=graph_line_width, render_markers=render_markers,
marker_style=marker_style, marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width, render_axes=render_axes,
axes_font_name=axes_font_name, axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, figure_size=figure_size)
# plot ellipses
renderer = plot_gaussian_ellipses(
covariances, means, n_std=n_std,
render_colour_bar=render_colour_bar,
colour_bar_label='Normalized Standard Deviation',
colour_map=colour_map, figure_id=renderer.figure_id,
new_figure=False, image_view=image_view,
line_colour=ellipse_line_colour, line_style=ellipse_line_style,
line_width=ellipse_line_width, render_markers=render_markers,
marker_edge_colour=marker_edge_colour,
marker_face_colour=marker_face_colour,
marker_edge_width=marker_edge_width, marker_size=marker_size,
marker_style=marker_style, render_axes=render_axes,
axes_font_name=axes_font_name, axes_font_size=axes_font_size,
axes_font_style=axes_font_style, axes_font_weight=axes_font_weight,
crop_proportion=crop_proportion, figure_size=figure_size)
return renderer
def __str__(self):
return _aps_str(self)
def _compute_minimum_spanning_tree(shapes, root_vertex=0, prefix='',
verbose=False):
# initialize weights matrix
n_vertices = shapes[0].n_points
weights = np.zeros((n_vertices, n_vertices))
# print progress if requested
range1 = range(n_vertices-1)
if verbose:
range1 = print_progress(
range1, end_with_newline=False,
prefix='{}Deformation graph - Computing complete graph`s '
'weights'.format(prefix))
# compute weights
for i in range1:
for j in range(i+1, n_vertices, 1):
# create data matrix of edge
diffs_x = [s.points[i, 0] - s.points[j, 0] for s in shapes]
diffs_y = [s.points[i, 1] - s.points[j, 1] for s in shapes]
coords = np.array([diffs_x, diffs_y])
# compute mean and covariance
m = np.mean(coords, axis=1)
c = np.cov(coords)
# get weight
for im in range(len(shapes)):
weights[i, j] += -np.log(multivariate_normal.pdf(coords[:, im],
mean=m, cov=c))
weights[j, i] = weights[i, j]
# create undirected graph
complete_graph = UndirectedGraph(weights)
if verbose:
print_dynamic('{}Deformation graph - Minimum spanning graph '
'computed.\n'.format(prefix))
# compute minimum spanning graph
return complete_graph.minimum_spanning_tree(root_vertex)
def _aps_str(aps):
if aps.diagonal is not None:
diagonal = aps.diagonal
else:
y, x = aps.reference_shape.range()
diagonal = np.sqrt(x ** 2 + y ** 2)
# Compute scale info strings
scales_info = []
lvl_str_tmplt = r""" - Scale {}
- Holistic feature: {}
- Patch shape: {}
- Appearance model class: {}
- {}
- {} features per point ({} in total)
- {}
- Shape model class: {}
- {}
- {} shape components
- {} similarity transform parameters
- Deformation model class: {}
- {}"""
for k, s in enumerate(aps.scales):
comp_str = "No SVD used"
if aps.appearance_models[k].n_components is not None:
comp_str = "{} SVD components".format(aps.appearance_models[k].n_components)
shape_model_str = "Trained using PCA"
if aps.shape_graph[k] is not None:
shape_model_str = "Trained using GMRF: {}".format(aps.shape_graph[k].__str__())
scales_info.append(lvl_str_tmplt.format(
s, name_of_callable(aps.holistic_features[k]),
aps.patch_shape[k],
name_of_callable(aps.appearance_models[k]),
aps.appearance_models[k].graph.__str__(),
aps.appearance_models[k].n_features_per_vertex,
aps.appearance_models[k].n_features,
comp_str,
name_of_callable(aps.shape_models[k]),
shape_model_str,
aps.shape_models[k].model.n_components,
aps.shape_models[k].n_global_parameters,
name_of_callable(aps.deformation_models[k]),
aps.deformation_models[k].graph.__str__()))
scales_info = '\n'.join(scales_info)
cls_str = r"""{class_title}
- Images scaled to diagonal: {diagonal:.2f}
- Scales: {scales}
{scales_info}
""".format(class_title=aps._str_title,
diagonal=diagonal,
scales=aps.scales,
scales_info=scales_info)
return cls_str
| bsd-3-clause |
MTgeophysics/mtpy | tests/modeling/__init__.py | 1 | 2712 | from __future__ import print_function
import shutil
from difflib import unified_diff
import matplotlib
import os
import sys
from matplotlib import pyplot as plt
if os.name == "posix" and 'DISPLAY' not in os.environ:
print("MATPLOTLIB: No Display found, using non-interactive svg backend", file=sys.stderr)
matplotlib.use('svg')
import matplotlib.pyplot as plt
else:
# matplotlib.use('svg')
import matplotlib.pyplot as plt
plt.ion()
def diff_files(after, before, ignores=None):
"""
compare two files using diff
:param ignores:
:param before:
:param after:
:return: the number count of different lines
"""
with open(before) as f2p:
before_lines = f2p.readlines()
with open(after) as f1p:
after_lines = f1p.readlines()
before_lines = [line.strip() for line in before_lines]
after_lines = [line.strip() for line in after_lines]
if ignores:
for ignored_term in ignores:
before_lines = [line for line in before_lines if ignored_term not in line]
after_lines = [line for line in before_lines if ignored_term not in line]
msg = "Comparing {} and {}:\n".format(before, after)
lines = [line for line in unified_diff(
before_lines,
after_lines,
fromfile="baseline ({})".format(before),
tofile="test ({})".format(after),
n=0)]
if lines:
msg += " Found differences:\n\t" + "\n\t".join(lines)
is_identical = False
else:
msg += " NO differences found."
is_identical = True
return is_identical, msg
def _clean_recreate(adir):
if os.path.exists(adir):
# clear dir if it already exist
shutil.rmtree(adir)
os.mkdir(adir)
# def show_patcher(show_func):
# """
# patch the plt.show() if interactive is enabled to display and then close the plot after 1 second
# so plt.show() will not block the script and the figure is still visible to the user
# :param show_func:
# :return:
# """
#
# def new_show_func(*args, **kwargs):
# stuff = show_func(*args, **kwargs)
# # wait 1 second for the image to show on screen
# figManager = plt.gcf()
# if figManager is not None:
# canvas = figManager.canvas
# # if canvas.figure.stale:
# # canvas.draw()
# # show(block=False)
# try:
# canvas.start_event_loop(1) # wait time = 1
# except NotImplementedError:
# pass
# finally:
# pass
# plt.close()
# return stuff
#
# return new_show_func if plt.isinteractive() else show_func
| gpl-3.0 |
henridwyer/scikit-learn | examples/calibration/plot_calibration.py | 225 | 4795 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Balazs Kegl <balazs.kegl@gmail.com>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
JackKelly/neuralnilm_prototype | scripts/e368.py | 2 | 5841 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1024,
# random_window=64,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.9,
one_target_per_seq=False,
n_seq_per_batch=64,
subsample_target=4,
include_diff=False,
include_power=True,
# clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
input_padding=2,
lag=0
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-1,
learning_rate_changes_by_iteration={
100: 1e-2,
# 400: 1e-3,
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True
# auto_reshape=False,
# plotter=CentralOutputPlotter
# plotter=MDNPlotter
)
def exp_a(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': BLSTMLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': rectify
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BLSTMLayer,
'num_units': 80,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=100000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
JWDebelius/American-Gut | tests/test_util.py | 1 | 13782 | #!/usr/bin/env python
import os
import pandas as pd
from StringIO import StringIO
from unittest import TestCase, main
from numpy import array, nan
from biom import Table
from pandas.util.testing import assert_frame_equal
from americangut.util import (
slice_mapping_file, parse_mapping_file,
verify_subset, concatenate_files, trim_fasta, count_samples,
count_seqs, count_unique_participants, clean_and_reformat_mapping,
add_alpha_diversity, get_single_id_lists
)
__author__ = "Daniel McDonald"
__copyright__ = "Copyright 2013, The American Gut Project"
__credits__ = ["Daniel McDonald", "Adam Robbins-Pianka"]
__license__ = "BSD"
__version__ = "unversioned"
__maintainer__ = "Daniel McDonald"
__email__ = "mcdonadt@colorado.edu"
class UtilTests(TestCase):
def test_count_samples(self):
test_mapping = ["#SampleID\tfoo\tbar",
"A\t1\t2",
"B\t1\t3",
"C\t2\t4",
"D\t3\t5",
"E\t2\t6"]
obs = count_samples(iter(test_mapping))
exp = 5
self.assertEqual(obs, exp)
obs = count_samples(iter(test_mapping), criteria={'foo': '2'})
exp = 2
def test_count_seqs(self):
test_seqs = [">a b",
"aattggcc",
">b.xyz stuff",
"asdasd",
">c",
"qweasd",
">d.foo",
"qweasdasd"]
obs = count_seqs(iter(test_seqs))
exp = 4
self.assertEqual(obs, exp)
obs = count_seqs(iter(test_seqs), subset=['b', 'c', 'foo'])
exp = 2
self.assertEqual(obs, exp)
def test_count_unique_participants(self):
test_mapping = ["#SampleID\tfoo\tbar\tHOST_SUBJECT_ID",
"A\t1\t2\tx",
"B\t1\t3\tx",
"C\t2\t4\ty",
"D\t3\t5\tz",
"E\t2\t6\tw"]
obs = count_unique_participants(iter(test_mapping))
exp = 4
self.assertEqual(obs, exp)
obs = count_unique_participants(iter(test_mapping),
criteria={'foo': '1'})
exp = 1
self.assertEqual(obs, exp)
obs = count_unique_participants(iter(test_mapping),
criteria={'foo': '2'})
exp = 2
self.assertEqual(obs, exp)
def test_verify_subset(self):
metadata = [('a','other stuff\tfoo'), ('b', 'asdasdasd'),
('c','123123123')]
table = Table(array([[1,2,3],[4,5,6]]),
['x', 'y'],
['a', 'b', 'c'])
self.assertTrue(verify_subset(table, metadata))
table = Table(array([[1,2],[3,4]]),
['x','y'],
['a','b'])
self.assertTrue(verify_subset(table, metadata))
table = Table(array([[1,2,3],[4,5,6]]),
['x','y'],
['a','b','x'])
self.assertFalse(verify_subset(table, metadata))
def test_slice_mapping_file(self):
header, metadata = parse_mapping_file(StringIO(test_mapping))
table = Table(array([[1,2],[4,5]]),
['x','y'],
['a','c'])
exp = ["a\t1\t123123", "c\tpoop\tdoesn't matter"]
obs = slice_mapping_file(table, metadata)
self.assertEqual(obs,exp)
def test_parse_mapping_file(self):
exp = ("#SampleIDs\tfoo\tbar", [['a','1\t123123'],
['b','yy\txxx'],
['c',"poop\tdoesn't matter"]])
obs = parse_mapping_file(StringIO(test_mapping))
self.assertEqual(obs, exp)
def test_concatenate_files(self):
expected_output = concat_test_input + concat_test_input
input_files = [StringIO(concat_test_input),
StringIO(concat_test_input)]
output_file = StringIO()
concatenate_files(input_files, output_file)
output_file.seek(0)
self.assertEqual(expected_output, output_file.read())
# try again with a tiny chunk size
input_files = [StringIO(concat_test_input),
StringIO(concat_test_input)]
output_file = StringIO()
concatenate_files(input_files, output_file, 2)
output_file.seek(0)
self.assertEqual(expected_output, output_file.read())
def test_trim_fasta(self):
infasta = StringIO(test_fasta)
# Trim length 10
expected = (">seq1\n"
"0123456789\n"
">seq2\n"
"0123456789\n"
">seq3\n"
"012345\n")
outfasta = StringIO()
trim_fasta(infasta, outfasta, 10)
outfasta.seek(0)
self.assertEqual(expected, outfasta.read())
def test_clean_and_reformat_mapping(self):
"""Exercise the reformat mapping code, verify expected results"""
out = StringIO()
reformat_mapping_testdata.seek(0)
clean_and_reformat_mapping(reformat_mapping_testdata, out, 'body_site',
'test')
out.seek(0)
# verify the resulting header structure
test_mapping = [l.strip().split('\t') for l in out]
test_header = test_mapping[0]
self.assertEqual(test_header[-4:], ['SIMPLE_BODY_SITE',
'TITLE_ACRONYM', 'TITLE_BODY_SITE',
'HMP_SITE'])
self.assertEqual(test_mapping[1][:], ['A', 'w00t', '43.0',
'UBERON_mucosa_of_tongue', '5',
'ORAL', 'test', 'test-ORAL',
'ORAL'])
self.assertEqual(test_mapping[2][:], ['B', 'left', '51.0',
'UBERON:FECES', '10',
'FECAL', 'test', 'test-FECAL',
'FECAL'])
self.assertEqual(test_mapping[3][:], ['C', 'right', '12.0',
'UBERON_FECES', '15',
'FECAL', 'test', 'test-FECAL',
'FECAL'])
self.assertEqual(test_mapping[4][:], ['E', 'stuff', '56.0',
'UBERON:SKIN', '37',
'SKIN', 'test', 'test-SKIN',
'SKIN'])
def test_clean_and_reformat_mapping_nopgp(self):
"""Exercise the reformat mapping code, verify expected results"""
out = StringIO()
reformat_mapping_testdata.seek(0)
clean_and_reformat_mapping(reformat_mapping_testdata, out, 'body_site',
'test')
out.seek(0)
# verify the resulting header structure
test_mapping = [l.strip().split('\t') for l in out]
test_header = test_mapping[0]
self.assertEqual(test_header[-4:], ['SIMPLE_BODY_SITE',
'TITLE_ACRONYM', 'TITLE_BODY_SITE',
'HMP_SITE'])
self.assertEqual(test_mapping[1][:], ['A', 'w00t', '43.0',
'UBERON_mucosa_of_tongue', '5',
'ORAL', 'test', 'test-ORAL',
'ORAL'])
self.assertEqual(test_mapping[2][:], ['B', 'left', '51.0',
'UBERON:FECES', '10',
'FECAL', 'test', 'test-FECAL',
'FECAL'])
self.assertEqual(test_mapping[3][:], ['C', 'right', '12.0',
'UBERON_FECES', '15',
'FECAL', 'test', 'test-FECAL',
'FECAL'])
self.assertEqual(test_mapping[4][:], ['E', 'stuff', '56.0',
'UBERON:SKIN', '37',
'SKIN', 'test', 'test-SKIN',
'SKIN'])
def test_clean_and_reformat_mapping_allpgp(self):
"""Exercise the reformat mapping code, verify expected results"""
out = StringIO()
reformat_mapping_testdata.seek(0)
clean_and_reformat_mapping(reformat_mapping_testdata, out, 'body_site',
'test')
out.seek(0)
# verify the resulting header structure
test_mapping = [l.strip().split('\t') for l in out]
test_header = test_mapping[0]
self.assertEqual(test_header[-4:], ['SIMPLE_BODY_SITE',
'TITLE_ACRONYM', 'TITLE_BODY_SITE',
'HMP_SITE'])
self.assertEqual(test_mapping[1][:], ['A', 'w00t', '43.0',
'UBERON_mucosa_of_tongue', '5',
'ORAL', 'test', 'test-ORAL',
'ORAL'])
self.assertEqual(test_mapping[2][:], ['B', 'left', '51.0',
'UBERON:FECES', '10',
'FECAL', 'test', 'test-FECAL',
'FECAL'])
self.assertEqual(test_mapping[3][:], ['C', 'right', '12.0',
'UBERON_FECES', '15',
'FECAL', 'test', 'test-FECAL',
'FECAL'])
self.assertEqual(test_mapping[4][:], ['E', 'stuff', '56.0',
'UBERON:SKIN', '37',
'SKIN', 'test', 'test-SKIN',
'SKIN'])
def test_add_alpha_diversity(self):
map_ = pd.DataFrame(
array([
['GAZ:w00t', '43.0', 'UBERON_mucosa_of_tongue', '5'],
['GAZ:left', '51.0', 'UBERON:FECES', '10'],
['GAZ:right', '12.0', 'UBERON_FECES', '15'],
['GAZ:stuff', '32.0', 'unknown', '26'],
['GAZ:stuff', '56.0', 'UBERON:SKIN', '37'],
]),
columns=['COUNTRY', 'AGE', 'BODY_SITE', 'BMI'],
index=['A', 'B', 'C', 'D', 'E']
)
alpha = {
'alpha_1': pd.DataFrame(
array([
['0', '1', '2', '3', '4'],
['100', '100', '100', '100', '100'],
[nan, nan, nan, nan, nan],
['14.5', '14.0', '15.1', '14.7', '14.4'],
['12.1', '15.2', '13.1', '14.1', '12.8'],
['16.2', '16.5', '16.9', '15.9', '16.2'],
['10.1', '9.8', '10.5', '10.0', '10.2'],
]),
columns=[
'alpha_rarefaction_100_0.txt',
'alpha_rarefaction_100_1.txt',
'alpha_rarefaction_100_2.txt',
'alpha_rarefaction_100_3.txt',
'alpha_rarefaction_100_4.txt',
],
index=['sequences per sample', 'iteration',
'A', 'B', 'C', 'D', 'E']
)
}
expected = pd.DataFrame(
array([
['GAZ:left', '51.0', 'UBERON:FECES', '10', 14.54],
['GAZ:right', '12.0', 'UBERON_FECES', '15', 13.46],
['GAZ:stuff', '32.0', 'unknown', '26', 16.34],
['GAZ:stuff', '56.0', 'UBERON:SKIN', '37', 10.12]
]),
index=['B', 'C', 'D', 'E'],
columns=['COUNTRY', 'AGE', 'BODY_SITE', 'BMI', 'alpha_1']
)
expected['alpha_1'] = expected['alpha_1'].astype(float)
test = add_alpha_diversity(map_, alpha)
assert_frame_equal(expected, test)
def test_get_single_id_list(self):
map_ = pd.DataFrame(
array([
['GAZ:w00t', '43.0', 'UBERON_mucosa_of_tongue', '5', 'A',
'12'],
['GAZ:left', '51.0', 'UBERON:FECES', '10', 'B', '1500'],
['GAZ:right', '12.0', 'UBERON_FECES', '15', 'C', '121'],
['GAZ:stuff', '32.0', 'unknown', '26', 'D', '150'],
['GAZ:stuff', '56.0', 'UBERON:SKIN', '37', 'E', '201'],
]),
columns=['COUNTRY', 'AGE', 'BODY_SITE', 'BMI', 'HOST_SUBJECT_ID',
'depth'],
index=['A', 'B', 'C', 'D', 'E']
)
depths = [100]
test = get_single_id_lists(map_, depths)
known = {100: ['B', 'C', 'D', 'E'],
'unrare': ['A', 'B', 'C', 'D', 'E']}
self.assertEqual(test, known)
test_mapping = """#SampleIDs\tfoo\tbar
a\t1\t123123
b\tyy\txxx
c\tpoop\tdoesn't matter
"""
concat_test_input="""This is
a
test file that is used
in the concatenation test. The file will be concatenated to itself."""
test_fasta = """>seq1
0123456789
>seq2
0123456789AB
>seq3
012345"""
reformat_mapping_testdata = StringIO(
"""#SampleID COUNTRY AGE BODY_SITE BMI
A GAZ:w00t 43.0 UBERON_mucosa_of_tongue 5
B GAZ:left 51.0 UBERON:FECES 10
C GAZ:right 12.0 UBERON_FECES 15
D GAZ:stuff 32.0 unknown 26
E GAZ:stuff 56.0 UBERON:SKIN 37
""")
if __name__ == '__main__':
main()
| bsd-3-clause |
grantvk/aima-python | submissions/Fritz/myNN.py | 13 | 4756 | from sklearn import datasets
from sklearn.neural_network import MLPClassifier
import traceback
from submissions.Fritz import medal_of_honor
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
honordata = DataFrame()
honordata.data = []
honortarget = []
class DataFrame2:
data2 = []
feature_names2 = []
target2 = []
target_names2 = []
honortarget2 = []
honordata2 = DataFrame2()
honordata2.data = []
medalofhonor = medal_of_honor.get_awardees(test=True)
for issued in medalofhonor:
try:
date = int(issued['birth']["date"]["year"])
honortarget.append(date)
date2 = int(issued['awarded']["date"]["month"])
honortarget2.append(date2)
day = int(issued['awarded']['date']['day'])
month = int(issued['awarded']['date']['month'])
year = int(issued['awarded']['date']['year'])
dayBorn = int(issued['birth']['date']['day'])
monthBorn = int(issued['birth']['date']['month'])
yearBorn = int(issued['birth']['date']['year'])
honordata.data.append([day, month, year])
honordata2.data.append([dayBorn, monthBorn, yearBorn])
except:
traceback.print_exc()
honordata.feature_names = [
'day',
'month',
'year',
]
honordata2.feature_names = [
'dayBorn',
'monthBorn',
'yearBorn',
]
honordata.target = []
honordata2.target = []
def targetdata(HDate):
if (HDate > 1880 and HDate != -1):
return 1
return 0
def targetdata2(HDate2):
if (HDate2 > 10 and HDate2 != -1):
return 1
return 0
for issued in honortarget:
TD = targetdata(issued)
honordata.target.append(TD)
honordata.target_names = [
'Born before 1880',
'Born after 1880',
]
for issued2 in honortarget2:
TD2 = targetdata2(issued2)
honordata2.target.append(TD2)
honordata2.target_names = [
'Awarded on or before October',
'Awarded after October',
]
'''
Make a customn classifier,
'''
mlpc = MLPClassifier(
# hidden_layer_sizes = (100,),
# activation = 'relu',
solver='sgd', # 'adam',
# alpha = 0.0001,
# batch_size='auto',
learning_rate = 'adaptive', # 'constant',
# power_t = 0.5,
max_iter = 1000, # 200,
# shuffle = True,
# random_state = None,
# tol = 1e-4,
# verbose = False,
# warm_start = False,
# momentum = 0.9,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
# beta_1 = 0.9,
# beta_2 = 0.999,
# epsilon = 1e-8,
)
'''
Scaling the data.
'''
dateScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(honordata.data)
dateScaled.data = scaleGrid(honordata.data)
dateScaled.feature_names = honordata.feature_names
dateScaled.target = honordata.target
dateScaled.target_names = honordata.target_names
dateScaled2 = DataFrame2()
def setupScales2(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid2(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled2 = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled2)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(honordata2.data)
dateScaled2.data = scaleGrid2(honordata2.data)
dateScaled2.feature_names = honordata2.feature_names
dateScaled2.target = honordata2.target
dateScaled2.target_names = honordata2.target_names
Examples = {
'Default Date':{
'frame': honordata,
},
'DateSGD': {
'frame': honordata,
'mlpc': mlpc
},
'dateScaled2': {
'frame': dateScaled,
},
}
| mit |
procoder317/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
nesterione/scikit-learn | examples/ensemble/plot_random_forest_embedding.py | 286 | 3531 | """
=========================================================
Hashing feature transformation using Totally Random Trees
=========================================================
RandomTreesEmbedding provides a way to map data to a
very high-dimensional, sparse representation, which might
be beneficial for classification.
The mapping is completely unsupervised and very efficient.
This example visualizes the partitions given by several
trees and shows how the transformation can also be used for
non-linear dimensionality reduction or non-linear classification.
Points that are neighboring often share the same leaf of a tree and therefore
share large parts of their hashed representation. This allows to
separate two concentric circles simply based on the principal components of the
transformed data.
In high-dimensional spaces, linear classifiers often achieve
excellent accuracy. For sparse binary data, BernoulliNB
is particularly well-suited. The bottom row compares the
decision boundary obtained by BernoulliNB in the transformed
space with an ExtraTreesClassifier forests learned on the
original data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
from sklearn.ensemble import RandomTreesEmbedding, ExtraTreesClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.naive_bayes import BernoulliNB
# make a synthetic dataset
X, y = make_circles(factor=0.5, random_state=0, noise=0.05)
# use RandomTreesEmbedding to transform data
hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3)
X_transformed = hasher.fit_transform(X)
# Visualize result using PCA
pca = TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
# Learn a Naive Bayes classifier on the transformed data
nb = BernoulliNB()
nb.fit(X_transformed, y)
# Learn an ExtraTreesClassifier for comparison
trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0)
trees.fit(X, y)
# scatter plot of original and reduced data
fig = plt.figure(figsize=(9, 8))
ax = plt.subplot(221)
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_title("Original Data (2d)")
ax.set_xticks(())
ax.set_yticks(())
ax = plt.subplot(222)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, s=50)
ax.set_title("PCA reduction (2d) of transformed data (%dd)" %
X_transformed.shape[1])
ax.set_xticks(())
ax.set_yticks(())
# Plot the decision in original space. For that, we will assign a color to each
# point in the mesh [x_min, m_max] x [y_min, y_max].
h = .01
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# transform grid using RandomTreesEmbedding
transformed_grid = hasher.transform(np.c_[xx.ravel(), yy.ravel()])
y_grid_pred = nb.predict_proba(transformed_grid)[:, 1]
ax = plt.subplot(223)
ax.set_title("Naive Bayes on Transformed data")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
# transform grid using ExtraTreesClassifier
y_grid_pred = trees.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
ax = plt.subplot(224)
ax.set_title("ExtraTrees predictions")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
plt.tight_layout()
plt.show()
| bsd-3-clause |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/pandas/tests/test_categorical.py | 9 | 162878 | # -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
from datetime import datetime
from pandas.compat import range, lrange, u, PY3
import os
import pickle
import re
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas import Categorical, Index, Series, DataFrame, PeriodIndex, Timestamp, CategoricalIndex
from pandas.core.config import option_context
import pandas.core.common as com
import pandas.compat as compat
import pandas.util.testing as tm
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c),dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(TypeError, lambda : Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(TypeError, lambda : Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'),categories=list('abc'),ordered=False)
c2 = Categorical(list('aabca'),categories=list('cab'),ordered=False)
c3 = Categorical(list('aabca'),categories=list('cab'),ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1,categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1,ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a","b","c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c","b","a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1,2], [1,2,2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a","b"], ["a","b","b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1,2], [1,2,np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a","b","c","d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a","c","b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a","c","b"])
c2 = Categorical(c1, categories=["a","b","c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a","b","c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a","b","c","d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a","c","b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a","b","c","d"])
c2 = Categorical(Series(["a", "b", "c", "a"]), categories=["a","b","c","d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1,2,3,np.nan], categories=[1,2,3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan,1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3 ])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3. ])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3], categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember the original type"
# feature to try to cast the array interface result to...
#vals = np.asarray(cat[cat.notnull()])
#self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan,"a", "b", "c"], categories=[np.nan,"a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.], categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0,1,2,0,1,2], categories=["a","b","c"])
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0,1,2,0,1,2], categories=[3,4,5])
# the next one are from the old docs, but unfortunately these don't trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3])
cat = Categorical([1,2], categories=[1,2,3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([],dtype='int64'),categories=[3,2,1],ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'),categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'),categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci.astype(object),categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull returned a scalar
# for a generator
from pandas.compat import range as xrange
exp = Categorical([0,1,2])
cat = Categorical((x for x in [0,1,2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0,1,2], categories=(x for x in [0,1,2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0,1,2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1,2], [1,2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1,2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0,1,2], ["a","a","b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2,1,2], ["a","b","c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a","b","c"], ordered=False)
res = Categorical.from_codes([0,1,2], ["a","b","c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0,1], 5, p=[0.9,0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a","b","c"], categories=["c","b","a"], ordered=True)
cat_rev_base = pd.Categorical(["b","b","b"], categories=["c","b","a"], ordered=True)
cat = pd.Categorical(["a","b","c"], ordered=True)
cat_base = pd.Categorical(["b","b","b"], categories=cat.categories, ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(["b","b","b"], categories=["c","b","a","d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b","b","b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on newer
# numpy versions
a = np.array(["b","b","b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in account
cat_rev = pd.Categorical(list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3/8., 2/8., 3/8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'], name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a","b","c","d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3/8., 2/8., 3/8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'], name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1,2,3,1,2,3,3,2,1,1,1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5/11., 3/11., 3/11.]},
index=pd.CategoricalIndex([1, 2, 3], name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan,1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1/4., 2/4., 1/4.]},
index=pd.CategoricalIndex([1, 2, np.nan], categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan], categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0,0],[1,0.25],[2,0.5],[1,0.25]],
columns=['counts','freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan], name='categories'))
tm.assert_frame_equal(result,expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"], categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1/3.], [2, 2/3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result,expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0,1,2,0,1,2]*100, ['a', 'b', 'c'], name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]",
"Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a","b","c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a","b","c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1,2,3,4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう'] * 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]"""
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう'] * 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]"""
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2],dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0],dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0],dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a","b","c","a"])
exp = np.array([1,2,3,1])
s.categories = [1,2,3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1,2,3]))
# lengthen
def f():
s.categories = [1,2,3,4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1,2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0,1,2])
self.assertFalse(cat.ordered)
cat = Categorical([0,1,2],ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0,1,2],ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a","c","b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a','b','c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a","c","b"], categories=['b','c','a'], ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b','c','a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a","c","b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a','b','c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a","c","b"], categories=['b','c','a'], ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b','c','a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a","b","c","a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a","b","c","a"], ordered=True)
exp_categories = np.array(["c","b","a"])
exp_values = np.array(["a","b","c","a"])
res = cat.set_categories(["c","b","a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a","b","c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a","b","c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now np.nan
cat = Categorical(["a","b","c","a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0,-1,-1,0]))
# still not all "old" in "new"
res = cat.set_categories(["a","b","d"])
self.assert_numpy_array_equal(res.codes, np.array([0,1,-1,0]))
self.assert_numpy_array_equal(res.categories, np.array(["a","b","d"]))
# all "old" included in "new"
cat = cat.set_categories(["a","b","c","d"])
exp_categories = np.array(["a","b","c","d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1,2,3,4,1], categories=[1,2,3,4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0,1,2,3,0]))
self.assert_numpy_array_equal(c.categories , np.array([1,2,3,4] ))
self.assert_numpy_array_equal(c.get_values(), np.array([1,2,3,4,1] ))
c = c.set_categories([4,3,2,1]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3,2,1,0,3])) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4,3,2,1])) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1,2,3,4,1])) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4,3,2,1],ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4,3,2,1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a","b","c","a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1,2,3])
self.assert_numpy_array_equal(res.__array__(), np.array([1,2,3,1]))
self.assert_numpy_array_equal(res.categories, np.array([1,2,3]))
self.assert_numpy_array_equal(cat.__array__(), np.array(["a","b","c","a"]))
self.assert_numpy_array_equal(cat.categories, np.array(["a","b","c"]))
res = cat.rename_categories([1,2,3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1,2,3,1]))
self.assert_numpy_array_equal(cat.categories, np.array([1,2,3]))
# lengthen
def f():
cat.rename_categories([1,2,3,4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1,2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a","b","c","a"], ordered=True)
old = cat.copy()
new = Categorical(["a","b","c","a"], categories=["c","b","a"], ordered=True)
# first inplace == False
res = cat.reorder_categories(["c","b","a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c","b","a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a","b","c","a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a","b","d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a","b","c","d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a","b","c","a"], ordered=True)
old = cat.copy()
new = Categorical(["a","b","c","a"], categories=["a","b","c","d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a","b","c","a"], ordered=True)
old = cat.copy()
new = Categorical(["a","b",np.nan,"a"], categories=["a","b"], ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a","b","c","d","a"], categories=["a","b","c","d","e"])
exp_categories_all = np.array(["a","b","c","d","e"])
exp_categories_dropped = np.array(["a","b","c","d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a","b","c",np.nan], categories=["a","b","c","d","e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, np.array(["a","b","c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [ 2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a","b",np.nan,"a"])
self.assert_numpy_array_equal(c.categories , np.array(["a","b"]))
self.assert_numpy_array_equal(c._codes , np.array([0,1,-1,0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories , np.array(["a","b"]))
self.assert_numpy_array_equal(c._codes , np.array([0,-1,-1,0]))
# If categories have nan included, the code should point to that instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a","b",np.nan,"a"], categories=["a","b",np.nan])
self.assert_numpy_array_equal(c.categories, np.array(["a","b",np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0,1,2,0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a","b",np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0,2,2,0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a","b","c","a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a","b",np.nan]
self.assert_numpy_array_equal(c.categories, np.array(["a","b",np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0,1,2,0]))
# Adding nan to categories should make assigned nan point to the category!
c = Categorical(["a","b",np.nan,"a"])
self.assert_numpy_array_equal(c.categories , np.array(["a","b"]))
self.assert_numpy_array_equal(c._codes , np.array([0,1,-1,0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a","b",np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories, np.array(["a","b",np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0,1,-1,0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories , np.array(["a","b",np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0,2,-1,0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT], [pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a","b",np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a","b",np.nan], categories=["a","b",np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a","b",np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a","b",np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a","b","c","a", np.nan])
exp = np.array([0,1,2,0,-1],dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0,1,2,0,1],dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes= c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be writeable!
c[4] = "a"
exp = np.array([0,1,2,0,0],dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0,1,2,0, 2],dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a","b","c","d"], ordered=False)
self.assertRaises(TypeError, lambda : cat.min())
self.assertRaises(TypeError, lambda : cat.max())
cat = Categorical(["a","b","c","d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a","b","c","d"], categories=['d','c','b','a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan,"b","c",np.nan], categories=['d','c','b','a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan,1,2,np.nan], categories=[5,4,3,2,1], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"], categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"], categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'], ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1,1,2,4,5,5,5], categories=[5,4,3,2,1], ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5,4,3,2,1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1,1,1,4,5,5,5], categories=[5,4,3,2,1], ordered=True)
res = s.mode()
exp = Categorical([5,1], categories=[5,4,3,2,1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1,2,3,4,5], categories=[5,4,3,2,1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5,4,3,2,1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan,np.nan,np.nan,4,5], categories=[5,4,3,2,1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5,4,3,2,1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan,np.nan,np.nan,4,5,4], categories=[5,4,3,2,1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5,4,3,2,1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan,np.nan,4,5,4], categories=[5,4,3,2,1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5,4,3,2,1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a","b","b","a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a","c","b","d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a","c","b","d"], categories=["a","b","c","d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d","c","b","a"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a","b","c","d","a","b","c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d","a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1,2,3])
exp = pd.Categorical([1,np.nan,3], categories=[1,2,3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0,3,2,-1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0,3,3,-1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0,3,0,-1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0,3,3,-1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1,2, np.nan, 3], categories=[1,2,3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0,1,3,2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1,2,3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1,2,3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo','foo','bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk' ])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts' ])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right', sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right', sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017, whatever is earlier
cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017, whatever is earlier
cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1,2,3, np.nan], levels=[1,2,3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0,1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1,2], ["a","b","c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057 and following
# comparisons with scalars not in categories should raise for unequal comps, but not for
# equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4 , [False, False, False])
self.assert_numpy_array_equal(cat != 4 , [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = [ "{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500) ]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat','obj','num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False,True,False],index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False,False,True],index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True,False,False],index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo','bar','baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400) ])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000) ])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo','bar','baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400) ])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300) ])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype,'category')
self.assertEqual(len(s),len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A' : self.factor })
result = df['A']
tm.assert_series_equal(result,s)
result = df.iloc[:,0]
tm.assert_series_equal(result,s)
self.assertEqual(len(df),len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A' : s })
result = df['A']
tm.assert_series_equal(result,s)
self.assertEqual(len(df),len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A' : s, 'B' : s, 'C' : 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df),len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1,'John P. Doe'],[2,'Jane Dove'],[1,'John P. Doe']],
columns=['person_id','person_name'])
x['person_name'] = pd.Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result,expected)
result = x.person_name[0]
self.assertEqual(result,expected)
result = x.person_name.loc[0]
self.assertEqual(result,expected)
def test_creation_astype(self):
l = ["a","b","c","a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1,2,3,1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats":[1,2,3,4,5,6], "vals":[1,2,3,4,5,6]})
cats = Categorical([1,2,3,4,5,6])
exp_df = pd.DataFrame({"cats":cats, "vals":[1,2,3,4,5,6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats":['a', 'b', 'b', 'a', 'a', 'd'], "vals":[1,2,3,4,5,6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats":cats, "vals":[1,2,3,4,5,6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a","b","c","a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1,2,3,1]
exp = Series(l).astype('category')
res = Series(l,dtype='category')
tm.assert_series_equal(res, exp)
l = ["a","b","c","a"]
exp = Series(l).astype('category')
res = Series(l,dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan,np.nan,np.nan],categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame({'x': Series(['a', 'b', 'c'],dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({ 'A' : list('abc') }, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({ 0 : Series(list('abc'),dtype='category')})
tm.assert_frame_equal(df,expected)
df = DataFrame([pd.Categorical(list('abc')),pd.Categorical(list('abd'))])
expected = DataFrame({ 0 : Series(list('abc'),dtype='category'),
1 : Series(list('abd'),dtype='category')},columns=[0,1])
tm.assert_frame_equal(df,expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')),list('def')])
expected = DataFrame({ 0 : Series(list('abc'),dtype='category'),
1 : list('def')},columns=[0,1])
tm.assert_frame_equal(df,expected)
# invalid (shape)
self.assertRaises(ValueError, lambda : DataFrame([pd.Categorical(list('abc')),pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError, lambda : pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo']*len(p.major_axis))
expected = DataFrame({'A' : c.copy(),
'B' : c.copy(),
'C' : c.copy(),
'D' : c.copy()},
columns=Index(list('ABCD'),name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'],dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan,np.nan,np.nan],categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b','c'],categories=['a', 'b', 'c']))
expected.index = [1,2]
result = s.reindex([1,2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(values=['c',np.nan],categories=['a', 'b', 'c']))
expected.index = [2,3]
result = s.reindex([2,3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either the series or the
# categorical should not change the values in the other one, IF you specify copy!
cat = Categorical(["a","b","c","a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1,2,3]
exp_s = np.array([1,2,3,1])
exp_cat = np.array(["a","b","c","a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2,2,3,1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a","b","c","a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1,2,3]
exp_s = np.array([1,2,3,1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2,2,3,1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a","b",np.nan,"a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a","b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0,1,-1,0]))
# If categories have nan included, the label should point to that instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(["a","b",np.nan,"a"], categories=["a","b",np.nan]))
self.assert_numpy_array_equal(s2.cat.categories,
np.array(["a","b",np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0,1,2,0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a","b","c","a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a","b",np.nan]
self.assert_numpy_array_equal(s3.cat.categories,
np.array(["a","b",np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0,1,2,0]))
def test_cat_accessor(self):
s = Series(Categorical(["a","b",np.nan,"a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a","b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a","b",np.nan,"a"], categories=["b","a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a","b",np.nan,"a"], categories=["b","a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id":[1,2,3,4,5,6], "raw_grade":['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result,expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda : Series([1,2,3]).cat)
tm.assertRaisesRegexp(AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda : Series([1,2,3]).cat)
self.assertRaises(AttributeError, lambda : Series(['a','b','c']).cat)
self.assertRaises(AttributeError, lambda : Series(np.arange(5.)).cat)
self.assertRaises(AttributeError, lambda : Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered' and the
# methods '.set_categories()' 'drop_unused_categories()' to the categorical
s = Series(Categorical(["a","b","c","a"], ordered=True))
exp_categories = np.array(["a","b","c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1,2,3]
exp_categories = np.array([1,2,3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0,1,2,0],dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a","b","c","a"], ordered=True))
exp_categories = np.array(["c","b","a"])
exp_values = np.array(["a","b","c","a"])
s = s.cat.set_categories(["c","b","a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a","b","b","a"], categories=["a","b","c"]))
exp_categories = np.array(["a","b"])
exp_values = np.array(["a","b","b","a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error on wrong inputs:
def f():
s.set_categories([4,3,2,1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = [ "{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False, labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(np.random.randint(0, 10000, 100),dtype='int32')})
labels = [ "{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500) ]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype()],index=['value','D'])
tm.assert_series_equal(result,expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(), com.CategoricalDtype()],
index=['value','D','E'])
tm.assert_series_equal(result,expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1,2,3,10], categories=[1,2,3,4,10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns),1)
# In a frame, describe() for the cat should be the same as for string arrays (count, unique,
# top, freq)
cat = Categorical(["a","b","b","b"], categories=['a','b','c'], ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4,2,"b",3],index=['count','unique','top', 'freq'])
tm.assert_series_equal(result,expected)
cat = pd.Series(pd.Categorical(["a","b","c","c"]))
df3 = pd.DataFrame({"cat":cat, "s":["a","b","c","c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1,2,3,4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a","b"] *25))
exp = u("0 a\n1 b\n" + " ..\n" +
"48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(["a","b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" +
"dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp,a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2 ,3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2 ,3, 1, 2 ,3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2 ,3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2 ,3, 1, 2 ,3], categories=[1, 2, 3], ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern')
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n 2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2 ,3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2 ,3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2 ,3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2 ,3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_frame(self):
# normal DataFrame
dt = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern')
p = pd.period_range('2011-01', freq='M', periods=5)
df = pd.DataFrame({'dt': dt, 'p': p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
df = pd.DataFrame({'dt': pd.Categorical(dt), 'p': pd.Categorical(p)})
self.assertEqual(repr(df), exp)
def test_info(self):
# make sure it works
n = 2500
df = DataFrame({ 'int64' : np.random.randint(100,size=n) })
df['category'] = Series(np.array(list('abcdefghij')).take(np.random.randint(0,10,size=n))).astype('category')
df.isnull()
df.info()
df2 = df[df['category']=='d']
df2.info()
def test_groupby_sort(self):
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
#self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
res = self.cat.groupby(['value_group'])['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a","b","c","d"], ordered=False))
self.assertRaises(TypeError, lambda : cat.min())
self.assertRaises(TypeError, lambda : cat.max())
cat = Series(Categorical(["a","b","c","d"], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Series(Categorical(["a","b","c","d"], categories=['d','c','b','a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Series(Categorical([np.nan,"b","c",np.nan], categories=['d','c','b','a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
cat = Series(Categorical([np.nan,1,2,np.nan], categories=[5,4,3,2,1], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
def test_mode(self):
s = Series(Categorical([1,1,2,4,5,5,5], categories=[5,4,3,2,1], ordered=True))
res = s.mode()
exp = Series(Categorical([5], categories=[5,4,3,2,1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1,1,1,4,5,5,5], categories=[5,4,3,2,1], ordered=True))
res = s.mode()
exp = Series(Categorical([5,1], categories=[5,4,3,2,1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1,2,3,4,5], categories=[5,4,3,2,1], ordered=True))
res = s.mode()
exp = Series(Categorical([], categories=[5,4,3,2,1], ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
s = pd.Series(pd.Categorical(["a","b","c","c","c","b"], categories=["c","a","b","d"]))
res = s.value_counts(sort=False)
exp = Series([3,1,2,0], index=pd.CategoricalIndex(["c","a","b","d"]))
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp = Series([3,2,1,0], index=pd.CategoricalIndex(["c","b","a","d"]))
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# https://github.com/pydata/pandas/issues/9443
s = pd.Series(["a", "b", "a"], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
s = pd.Series(["a", "b", None, "a", None, None], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"])))
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
tm.assert_series_equal(
s.value_counts(dropna=False, sort=False),
pd.Series([2, 1, 3], index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(["a", "b", "a"], categories=["a", "b", np.nan]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1, 0], index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(["a", "b", None, "a", None, None],
categories=["a", "b", np.nan]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"])))
def test_groupby(self):
cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"], categories=["a","b","c","d"], ordered=True)
data = DataFrame({"a":[1,1,1,2,2,2,3,4,5], "b":cats})
expected = DataFrame({'a': Series([1, 2, 4, np.nan],
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'], name='b'))})
result = data.groupby("b").mean()
tm.assert_frame_equal(result, expected)
raw_cat1 = Categorical(["a","a","b","b"], categories=["a","b","z"], ordered=True)
raw_cat2 = Categorical(["c","d","c","d"], categories=["c","d","y"], ordered=True)
df = DataFrame({"A":raw_cat1,"B":raw_cat2, "values":[1,2,3,4]})
# single grouper
gb = df.groupby("A")
exp_idx = pd.CategoricalIndex(['a', 'b', 'z'], name='A')
expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers
gb = df.groupby(['A','B'])
expected = DataFrame({ 'values' : Series([1,2,np.nan,3,4,np.nan,np.nan,np.nan,np.nan],
index=pd.MultiIndex.from_product([['a','b','z'],['c','d','y']],names=['A','B'])) })
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers with a non-cat
df = df.copy()
df['C'] = ['foo','bar']*2
gb = df.groupby(['A','B','C'])
expected = DataFrame({ 'values' :
Series(np.nan,index=pd.MultiIndex.from_product([['a','b','z'],
['c','d','y'],
['foo','bar']],
names=['A','B','C']))
}).sortlevel()
expected.iloc[[1,2,7,8],0] = [1,2,3,4]
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x=pd.DataFrame([[1,'John P. Doe'],[2,'Jane Dove'],[1,'John P. Doe']],
columns=['person_id','person_name'])
x['person_name'] = pd.Categorical(x.person_name)
g = x.groupby(['person_id'])
result = g.transform(lambda x:x)
tm.assert_frame_equal(result, x[['person_name']])
result = x.drop_duplicates('person_name')
expected = x.iloc[[0,1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates('person_name').iloc[0]
result = g.apply(f)
expected = x.iloc[[0,1]].copy()
expected.index = Index([1,2],name='person_id')
expected['person_name'] = expected['person_name'].astype('object')
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0,10,20,30,40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
# Filter
tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0,10,20,30,40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
# GH 9603
df = pd.DataFrame({'a': [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4])
result = df.groupby(c).apply(len)
expected = pd.Series([1, 0, 0, 0], index=pd.CategoricalIndex(c.values.categories))
expected.index.name = 'a'
tm.assert_series_equal(result, expected)
def test_pivot_table(self):
raw_cat1 = Categorical(["a","a","b","b"], categories=["a","b","z"], ordered=True)
raw_cat2 = Categorical(["c","d","c","d"], categories=["c","d","y"], ordered=True)
df = DataFrame({"A":raw_cat1,"B":raw_cat2, "values":[1,2,3,4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'])
expected = Series([1,2,np.nan,3,4,np.nan,np.nan,np.nan,np.nan],
index=pd.MultiIndex.from_product([['a','b','z'],['c','d','y']],names=['A','B']),
name='values')
tm.assert_series_equal(result, expected)
def test_count(self):
s = Series(Categorical([np.nan,1,2,np.nan], categories=[5,4,3,2,1], ordered=True))
result = s.count()
self.assertEqual(result, 2)
def test_sort(self):
c = Categorical(["a","b","b","a"], ordered=False)
cat = Series(c)
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
c.order()
# sort in the categories order
expected = Series(Categorical(["a","a","b","b"], ordered=False),index=[0,3,1,2])
result = cat.sort_values()
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a","c","b","d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a","b","c","d"])
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Series(Categorical(["a","c","b","d"], categories=["a","b","c","d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a","b","c","d"])
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d","c","b","a"])
self.assert_numpy_array_equal(res.__array__(), exp)
raw_cat1 = Categorical(["a","b","c","d"], categories=["a","b","c","d"], ordered=False)
raw_cat2 = Categorical(["a","b","c","d"], categories=["d","c","b","a"], ordered=True)
s = ["a","b","c","d"]
df = DataFrame({"unsort":raw_cat1,"sort":raw_cat2, "string":s, "values":[1,2,3,4]})
# Cats must be sorted in a dataframe
res = df.sort_values(by=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
self.assertEqual(res["sort"].dtype, "category")
res = df.sort_values(by=["sort"], ascending=False)
exp = df.sort_values(by=["string"], ascending=True)
self.assert_numpy_array_equal(res["values"], exp["values"])
self.assertEqual(res["sort"].dtype, "category")
self.assertEqual(res["unsort"].dtype, "category")
# unordered cat, but we allow this
df.sort_values(by=["unsort"], ascending=False)
# multi-columns sort
# GH 7848
df = DataFrame({"id":[6,5,4,3,2,1], "raw_grade":['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"], ordered=True)
df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])
# sorts 'grade' according to the order of the categories
result = df.sort_values(by=['grade'])
expected = df.iloc[[1,2,5,0,3,4]]
tm.assert_frame_equal(result,expected)
# multi
result = df.sort_values(by=['grade', 'id'])
expected = df.iloc[[2,1,5,4,3,0]]
tm.assert_frame_equal(result,expected)
# reverse
cat = Categorical(["a","c","c","b","d"], ordered=True)
res = cat.sort_values(ascending=False)
exp_val = np.array(["d","c", "c", "b","a"],dtype=object)
exp_categories = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
# some NaN positions
cat = Categorical(["a","c","b","d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d","c","b","a", np.nan],dtype=object)
exp_categories = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a","c","b","d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d","c","b","a"],dtype=object)
exp_categories = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a","c","b","d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d","c","b","a"],dtype=object)
exp_categories = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a","c","b","d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d","c","b","a",np.nan],dtype=object)
exp_categories = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
def test_slicing(self):
cat = Series(Categorical([1,2,3,4]))
reversed = cat[::-1]
exp = np.array([4,3,2,1])
self.assert_numpy_array_equal(reversed.__array__(), exp)
df = DataFrame({'value': (np.arange(100)+1).astype('int64')})
df['D'] = pd.cut(df.value, bins=[0,25,50,75,100])
expected = Series([11,'(0, 25]'], index=['value','D'], name=10)
result = df.iloc[10]
tm.assert_series_equal(result, expected)
expected = DataFrame({'value': np.arange(11,21).astype('int64')},
index=np.arange(10,20).astype('int64'))
expected['D'] = pd.cut(expected.value, bins=[0,25,50,75,100])
result = df.iloc[10:20]
tm.assert_frame_equal(result, expected)
expected = Series([9,'(0, 25]'],index=['value', 'D'], name=8)
result = df.loc[8]
tm.assert_series_equal(result, expected)
def test_slicing_and_getting_ops(self):
# systematically test the slicing operations:
# for all slicing ops:
# - returning a dataframe
# - returning a column
# - returning a row
# - returning a single value
cats = pd.Categorical(["a","c","b","c","c","c","c"], categories=["a","b","c"])
idx = pd.Index(["h","i","j","k","l","m","n"])
values= [1,2,3,4,5,6,7]
df = pd.DataFrame({"cats":cats,"values":values}, index=idx)
# the expected values
cats2 = pd.Categorical(["b","c"], categories=["a","b","c"])
idx2 = pd.Index(["j","k"])
values2= [3,4]
# 2:4,: | "j":"k",:
exp_df = pd.DataFrame({"cats":cats2,"values":values2}, index=idx2)
# :,"cats" | :,0
exp_col = pd.Series(cats,index=idx,name='cats')
# "j",: | 2,:
exp_row = pd.Series(["b",3], index=["cats","values"], dtype="object", name="j")
# "j","cats | 2,0
exp_val = "b"
# iloc
# frame
res_df = df.iloc[2:4,:]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.iloc[2,:]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.iloc[:,0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.iloc[2,0]
self.assertEqual(res_val, exp_val)
# loc
# frame
res_df = df.loc["j":"k",:]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.loc["j",:]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:,"cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.loc["j","cats"]
self.assertEqual(res_val, exp_val)
# ix
# frame
#res_df = df.ix["j":"k",[0,1]] # doesn't work?
res_df = df.ix["j":"k",:]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.ix["j",:]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.ix[:,"cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.ix["j",0]
self.assertEqual(res_val, exp_val)
# iat
res_val = df.iat[2,0]
self.assertEqual(res_val, exp_val)
# at
res_val = df.at["j","cats"]
self.assertEqual(res_val, exp_val)
# fancy indexing
exp_fancy = df.iloc[[2]]
res_fancy = df[df["cats"] == "b"]
tm.assert_frame_equal(res_fancy,exp_fancy)
res_fancy = df[df["values"] == 3]
tm.assert_frame_equal(res_fancy,exp_fancy)
# get_value
res_val = df.get_value("j","cats")
self.assertEqual(res_val, exp_val)
# i : int, slice, or sequence of integers
res_row = df.iloc[2]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
res_df = df.iloc[slice(2,4)]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[[2,3]]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_col = df.iloc[:,0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
res_df = df.iloc[:,slice(0,2)]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[:,[0,1]]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
def test_slicing_doc_examples(self):
#GH 7918
cats = Categorical(["a","b","b","b","c","c","c"], categories=["a","b","c"])
idx = Index(["h","i","j","k","l","m","n",])
values= [1,2,2,2,3,4,5]
df = DataFrame({"cats":cats,"values":values}, index=idx)
result = df.iloc[2:4,:]
expected = DataFrame({"cats":Categorical(['b','b'],categories=['a','b','c']),"values":[2,2]}, index=['j','k'])
tm.assert_frame_equal(result, expected)
result = df.iloc[2:4,:].dtypes
expected = Series(['category','int64'],['cats','values'])
tm.assert_series_equal(result, expected)
result = df.loc["h":"j","cats"]
expected = Series(Categorical(['a','b','b'],
categories=['a','b','c']), index=['h','i','j'], name='cats')
tm.assert_series_equal(result, expected)
result = df.ix["h":"j",0:1]
expected = DataFrame({'cats' : Series(Categorical(['a','b','b'],categories=['a','b','c']),index=['h','i','j']) })
tm.assert_frame_equal(result, expected)
def test_assigning_ops(self):
# systematically test the assigning operations:
# for all slicing ops:
# for value in categories and value not in categories:
# - assign a single value -> exp_single_cats_value
# - assign a complete row (mixed values) -> exp_single_row
# - assign multiple rows (mixed values) (-> array) -> exp_multi_row
# - assign a part of a column with dtype == categorical -> exp_parts_cats_col
# - assign a part of a column with dtype != categorical -> exp_parts_cats_col
cats = pd.Categorical(["a","a","a","a","a","a","a"], categories=["a","b"])
idx = pd.Index(["h","i","j","k","l","m","n"])
values = [1,1,1,1,1,1,1]
orig = pd.DataFrame({"cats":cats,"values":values}, index=idx)
### the expected values
# changed single row
cats1 = pd.Categorical(["a","a","b","a","a","a","a"], categories=["a","b"])
idx1 = pd.Index(["h","i","j","k","l","m","n"])
values1 = [1,1,2,1,1,1,1]
exp_single_row = pd.DataFrame({"cats":cats1,"values":values1}, index=idx1)
#changed multiple rows
cats2 = pd.Categorical(["a","a","b","b","a","a","a"], categories=["a","b"])
idx2 = pd.Index(["h","i","j","k","l","m","n"])
values2 = [1,1,2,2,1,1,1]
exp_multi_row = pd.DataFrame({"cats":cats2,"values":values2}, index=idx2)
# changed part of the cats column
cats3 = pd.Categorical(["a","a","b","b","a","a","a"], categories=["a","b"])
idx3 = pd.Index(["h","i","j","k","l","m","n"])
values3 = [1,1,1,1,1,1,1]
exp_parts_cats_col = pd.DataFrame({"cats":cats3,"values":values3}, index=idx3)
# changed single value in cats col
cats4 = pd.Categorical(["a","a","b","a","a","a","a"], categories=["a","b"])
idx4 = pd.Index(["h","i","j","k","l","m","n"])
values4 = [1,1,1,1,1,1,1]
exp_single_cats_value = pd.DataFrame({"cats":cats4,"values":values4}, index=idx4)
#### iloc #####
################
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.iloc[2,0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.iloc[df.index == "j",0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iloc[2,0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.iloc[2,:] = ["b",2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.iloc[2,:] = ["c",2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.iloc[2:4,:] = [["b",2],["b",2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.iloc[2:4,:] = [["c",2],["c",2]]
self.assertRaises(ValueError, f)
# - assign a part of a column with dtype == categorical -> exp_parts_cats_col
df = orig.copy()
df.iloc[2:4,0] = pd.Categorical(["b","b"], categories=["a","b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.iloc[2:4,0] = pd.Categorical(["b","b"], categories=["a","b","c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.iloc[2:4,0] = pd.Categorical(["c","c"], categories=["a","b","c"])
# - assign a part of a column with dtype != categorical -> exp_parts_cats_col
df = orig.copy()
df.iloc[2:4,0] = ["b","b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.iloc[2:4,0] = ["c","c"]
#### loc #####
################
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j","cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j","cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.loc["j","cats"] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j",:] = ["b",2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.loc["j",:] = ["c",2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k",:] = [["b",2],["b",2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.loc["j":"k",:] = [["c",2],["c",2]]
self.assertRaises(ValueError, f)
# - assign a part of a column with dtype == categorical -> exp_parts_cats_col
df = orig.copy()
df.loc["j":"k","cats"] = pd.Categorical(["b","b"], categories=["a","b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k","cats"] = pd.Categorical(["b","b"], categories=["a","b","c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.loc["j":"k","cats"] = pd.Categorical(["c","c"], categories=["a","b","c"])
# - assign a part of a column with dtype != categorical -> exp_parts_cats_col
df = orig.copy()
df.loc["j":"k","cats"] = ["b","b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.loc["j":"k","cats"] = ["c","c"]
#### ix #####
################
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.ix["j",0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.ix[df.index == "j",0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.ix["j",0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.ix["j",:] = ["b",2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.ix["j",:] = ["c",2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.ix["j":"k",:] = [["b",2],["b",2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.ix["j":"k",:] = [["c",2],["c",2]]
self.assertRaises(ValueError, f)
# - assign a part of a column with dtype == categorical -> exp_parts_cats_col
df = orig.copy()
df.ix["j":"k",0] = pd.Categorical(["b","b"], categories=["a","b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.ix["j":"k",0] = pd.Categorical(["b","b"], categories=["a","b","c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.ix["j":"k",0] = pd.Categorical(["c","c"], categories=["a","b","c"])
# - assign a part of a column with dtype != categorical -> exp_parts_cats_col
df = orig.copy()
df.ix["j":"k",0] = ["b","b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.ix["j":"k",0] = ["c","c"]
# iat
df = orig.copy()
df.iat[2,0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iat[2,0] = "c"
self.assertRaises(ValueError, f)
# at
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.at["j","cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.at["j","cats"] = "c"
self.assertRaises(ValueError, f)
# fancy indexing
catsf = pd.Categorical(["a","a","c","c","a","a","a"], categories=["a","b","c"])
idxf = pd.Index(["h","i","j","k","l","m","n"])
valuesf = [1,1,3,3,1,1,1]
df = pd.DataFrame({"cats":catsf,"values":valuesf}, index=idxf)
exp_fancy = exp_multi_row.copy()
exp_fancy["cats"].cat.set_categories(["a","b","c"], inplace=True)
df[df["cats"] == "c"] = ["b",2]
tm.assert_frame_equal(df, exp_multi_row)
# set_value
df = orig.copy()
df.set_value("j","cats", "b")
tm.assert_frame_equal(df, exp_single_cats_value)
def f():
df = orig.copy()
df.set_value("j","cats", "c")
self.assertRaises(ValueError, f)
# Assigning a Category to parts of a int/... column uses the values of the Catgorical
df = pd.DataFrame({"a":[1,1,1,1,1], "b":["a","a","a","a","a"]})
exp = pd.DataFrame({"a":[1,"b","b",1,1], "b":["a","a","b","b","a"]})
df.loc[1:2,"a"] = pd.Categorical(["b","b"], categories=["a","b"])
df.loc[2:3,"b"] = pd.Categorical(["b","b"], categories=["a","b"])
tm.assert_frame_equal(df, exp)
######### Series ##########
orig = Series(pd.Categorical(["b","b"], categories=["a","b"]))
s = orig.copy()
s[:] = "a"
exp = Series(pd.Categorical(["a","a"], categories=["a","b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[1] = "a"
exp = Series(pd.Categorical(["b","a"], categories=["a","b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[s.index > 0] = "a"
exp = Series(pd.Categorical(["b","a"], categories=["a","b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[[False, True]] = "a"
exp = Series(pd.Categorical(["b","a"], categories=["a","b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s.index = ["x", "y"]
s["y"] = "a"
exp = Series(pd.Categorical(["b","a"], categories=["a","b"]), index=["x", "y"])
tm.assert_series_equal(s, exp)
# ensure that one can set something to np.nan
s = Series(Categorical([1,2,3]))
exp = Series(Categorical([1,np.nan,3]))
s[1] = np.nan
tm.assert_series_equal(s, exp)
def test_comparisons(self):
tests_data = [(list("abc"), list("cba"), list("bbb")),
([1,2,3], [3,2,1], [2,2,2])]
for data , reverse, base in tests_data:
cat_rev = pd.Series(pd.Categorical(data, categories=reverse, ordered=True))
cat_rev_base = pd.Series(pd.Categorical(base, categories=reverse, ordered=True))
cat = pd.Series(pd.Categorical(data, ordered=True))
cat_base = pd.Series(pd.Categorical(base, categories=cat.cat.categories, ordered=True))
s = Series(base)
a = np.array(base)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
# categorical cannot be compared to Series or numpy array, and also not the other way
# around
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
def f():
cat > "b"
self.assertRaises(TypeError, f)
cat = Series(Categorical(list("abc"), ordered=False))
def f():
cat > "b"
self.assertRaises(TypeError, f)
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057 and following
# comparisons with scalars not in categories should raise for unequal comps, but not for
# equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
self.assertRaises(TypeError, lambda: cat < "d")
self.assertRaises(TypeError, lambda: cat > "d")
self.assertRaises(TypeError, lambda: "d" < cat)
self.assertRaises(TypeError, lambda: "d" > cat)
self.assert_series_equal(cat == "d" , Series([False, False, False]))
self.assert_series_equal(cat != "d" , Series([True, True, True]))
# And test NaN handling...
cat = Series(Categorical(["a","b","c", np.nan]))
exp = Series([True, True, True, False])
res = (cat == cat)
tm.assert_series_equal(res, exp)
def test_cat_equality(self):
# GH 8938
# allow equality comparisons
a = Series(list('abc'),dtype="category")
b = Series(list('abc'),dtype="object")
c = Series(['a','b','cc'],dtype="object")
d = Series(list('acb'),dtype="object")
e = Categorical(list('abc'))
f = Categorical(list('acb'))
# vs scalar
self.assertFalse((a=='a').all())
self.assertTrue(((a!='a') == ~(a=='a')).all())
self.assertFalse(('a'==a).all())
self.assertTrue((a=='a')[0])
self.assertTrue(('a'==a)[0])
self.assertFalse(('a'!=a)[0])
# vs list-like
self.assertTrue((a==a).all())
self.assertFalse((a!=a).all())
self.assertTrue((a==list(a)).all())
self.assertTrue((a==b).all())
self.assertTrue((b==a).all())
self.assertTrue(((~(a==b))==(a!=b)).all())
self.assertTrue(((~(b==a))==(b!=a)).all())
self.assertFalse((a==c).all())
self.assertFalse((c==a).all())
self.assertFalse((a==d).all())
self.assertFalse((d==a).all())
# vs a cat-like
self.assertTrue((a==e).all())
self.assertTrue((e==a).all())
self.assertFalse((a==f).all())
self.assertFalse((f==a).all())
self.assertTrue(((~(a==e)==(a!=e)).all()))
self.assertTrue(((~(e==a)==(e!=a)).all()))
self.assertTrue(((~(a==f)==(a!=f)).all()))
self.assertTrue(((~(f==a)==(f!=a)).all()))
# non-equality is not comparable
self.assertRaises(TypeError, lambda: a < b)
self.assertRaises(TypeError, lambda: b < a)
self.assertRaises(TypeError, lambda: a > b)
self.assertRaises(TypeError, lambda: b > a)
def test_concat(self):
cat = pd.Categorical(["a","b"], categories=["a","b"])
vals = [1,2]
df = pd.DataFrame({"cats":cat, "vals":vals})
cat2 = pd.Categorical(["a","b","a","b"], categories=["a","b"])
vals2 = [1,2,1,2]
exp = pd.DataFrame({"cats":cat2, "vals":vals2}, index=pd.Index([0, 1, 0, 1]))
res = pd.concat([df,df])
tm.assert_frame_equal(exp, res)
# Concat should raise if the two categoricals do not have the same categories
cat3 = pd.Categorical(["a","b"], categories=["a","b","c"])
vals3 = [1,2]
df_wrong_categories = pd.DataFrame({"cats":cat3, "vals":vals3})
def f():
pd.concat([df,df_wrong_categories])
self.assertRaises(ValueError, f)
# GH 7864
# make sure ordering is preserverd
df = pd.DataFrame({"id":[1,2,3,4,5,6], "raw_grade":['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"])
df['grade'].cat.set_categories(['e', 'a', 'b'])
df1 = df[0:3]
df2 = df[3:]
self.assert_numpy_array_equal(df['grade'].cat.categories, df1['grade'].cat.categories)
self.assert_numpy_array_equal(df['grade'].cat.categories, df2['grade'].cat.categories)
dfx = pd.concat([df1, df2])
dfx['grade'].cat.categories
self.assert_numpy_array_equal(df['grade'].cat.categories, dfx['grade'].cat.categories)
def test_concat_preserve(self):
# GH 8641
# series concat not preserving category dtype
s = Series(list('abc'),dtype='category')
s2 = Series(list('abd'),dtype='category')
def f():
pd.concat([s,s2])
self.assertRaises(ValueError, f)
result = pd.concat([s,s],ignore_index=True)
expected = Series(list('abcabc')).astype('category')
tm.assert_series_equal(result, expected)
result = pd.concat([s,s])
expected = Series(list('abcabc'),index=[0,1,2,0,1,2]).astype('category')
tm.assert_series_equal(result, expected)
a = Series(np.arange(6,dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A' : a, 'B' : b.astype('category',categories=list('cab')) })
result = pd.concat([df2,df2])
expected = DataFrame({'A' : pd.concat([a,a]), 'B' : pd.concat([b,b]).astype('category',categories=list('cab')) })
tm.assert_frame_equal(result, expected)
def test_categorical_index_preserver(self):
a = Series(np.arange(6,dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A' : a, 'B' : b.astype('category',categories=list('cab')) }).set_index('B')
result = pd.concat([df2,df2])
expected = DataFrame({'A' : pd.concat([a,a]), 'B' : pd.concat([b,b]).astype('category',categories=list('cab')) }).set_index('B')
tm.assert_frame_equal(result, expected)
# wrong catgories
df3 = DataFrame({'A' : a, 'B' : b.astype('category',categories=list('abc')) }).set_index('B')
self.assertRaises(TypeError, lambda : pd.concat([df2,df3]))
def test_append(self):
cat = pd.Categorical(["a","b"], categories=["a","b"])
vals = [1,2]
df = pd.DataFrame({"cats":cat, "vals":vals})
cat2 = pd.Categorical(["a","b","a","b"], categories=["a","b"])
vals2 = [1,2,1,2]
exp = pd.DataFrame({"cats":cat2, "vals":vals2}, index=pd.Index([0, 1, 0, 1]))
res = df.append(df)
tm.assert_frame_equal(exp, res)
# Concat should raise if the two categoricals do not have the same categories
cat3 = pd.Categorical(["a","b"], categories=["a","b","c"])
vals3 = [1,2]
df_wrong_categories = pd.DataFrame({"cats":cat3, "vals":vals3})
def f():
df.append(df_wrong_categories)
self.assertRaises(ValueError, f)
def test_merge(self):
# GH 9426
right = DataFrame({'c': {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e'},
'd': {0: 'null', 1: 'null', 2: 'null', 3: 'null', 4: 'null'}})
left = DataFrame({'a': {0: 'f', 1: 'f', 2: 'f', 3: 'f', 4: 'f'},
'b': {0: 'g', 1: 'g', 2: 'g', 3: 'g', 4: 'g'}})
df = pd.merge(left, right, how='left', left_on='b', right_on='c')
# object-object
expected = df.copy()
# object-cat
cright = right.copy()
cright['d'] = cright['d'].astype('category')
result = pd.merge(left, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
# cat-object
cleft = left.copy()
cleft['b'] = cleft['b'].astype('category')
result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
# cat-cat
cright = right.copy()
cright['d'] = cright['d'].astype('category')
cleft = left.copy()
cleft['b'] = cleft['b'].astype('category')
result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
def test_repeat(self):
#GH10183
cat = pd.Categorical(["a","b"], categories=["a","b"])
exp = pd.Categorical(["a", "a", "b", "b"], categories=["a","b"])
res = cat.repeat(2)
self.assert_categorical_equal(res, exp)
def test_na_actions(self):
cat = pd.Categorical([1,2,3,np.nan], categories=[1,2,3])
vals = ["a","b",np.nan,"d"]
df = pd.DataFrame({"cats":cat, "vals":vals})
cat2 = pd.Categorical([1,2,3,3], categories=[1,2,3])
vals2 = ["a","b","b","d"]
df_exp_fill = pd.DataFrame({"cats":cat2, "vals":vals2})
cat3 = pd.Categorical([1,2,3], categories=[1,2,3])
vals3 = ["a","b",np.nan]
df_exp_drop_cats = pd.DataFrame({"cats":cat3, "vals":vals3})
cat4 = pd.Categorical([1,2], categories=[1,2,3])
vals4 = ["a","b"]
df_exp_drop_all = pd.DataFrame({"cats":cat4, "vals":vals4})
# fillna
res = df.fillna(value={"cats":3, "vals":"b"})
tm.assert_frame_equal(res, df_exp_fill)
def f():
df.fillna(value={"cats":4, "vals":"c"})
self.assertRaises(ValueError, f)
res = df.fillna(method='pad')
tm.assert_frame_equal(res, df_exp_fill)
res = df.dropna(subset=["cats"])
tm.assert_frame_equal(res, df_exp_drop_cats)
res = df.dropna()
tm.assert_frame_equal(res, df_exp_drop_all)
# make sure that fillna takes both missing values and NA categories into account
c = Categorical(["a","b",np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a","b",np.nan], rename=True, inplace=True)
c[0] = np.nan
df = pd.DataFrame({"cats":c, "vals":[1,2,3]})
df_exp = pd.DataFrame({"cats": Categorical(["a","b","a"]), "vals": [1,2,3]})
res = df.fillna("a")
tm.assert_frame_equal(res, df_exp)
def test_astype_to_other(self):
s = self.cat['value_group']
expected = s
tm.assert_series_equal(s.astype('category'),expected)
tm.assert_series_equal(s.astype(com.CategoricalDtype()),expected)
self.assertRaises(ValueError, lambda : s.astype('float64'))
cat = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']))
exp = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
tm.assert_series_equal(cat.astype('str'), exp)
s2 = Series(Categorical.from_array(['1', '2', '3', '4']))
exp2 = Series([1,2,3,4]).astype(int)
tm.assert_series_equal(s2.astype('int') , exp2)
# object don't sort correctly, so just compare that we have the same values
def cmp(a,b):
tm.assert_almost_equal(np.sort(np.unique(a)),np.sort(np.unique(b)))
expected = Series(np.array(s.values),name='value_group')
cmp(s.astype('object'),expected)
cmp(s.astype(np.object_),expected)
# array conversion
tm.assert_almost_equal(np.array(s),np.array(s.values))
# valid conversion
for valid in [lambda x: x.astype('category'),
lambda x: x.astype(com.CategoricalDtype()),
lambda x: x.astype('object').astype('category'),
lambda x: x.astype('object').astype(com.CategoricalDtype())]:
result = valid(s)
tm.assert_series_equal(result,s)
# invalid conversion (these are NOT a dtype)
for invalid in [lambda x: x.astype(pd.Categorical),
lambda x: x.astype('object').astype(pd.Categorical)]:
self.assertRaises(TypeError, lambda : invalid(s))
def test_astype_categorical(self):
cat = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
tm.assert_categorical_equal(cat,cat.astype('category'))
tm.assert_almost_equal(np.array(cat),cat.astype('object'))
self.assertRaises(ValueError, lambda : cat.astype(float))
def test_to_records(self):
# GH8626
# dict creation
df = DataFrame({ 'A' : list('abc') }, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# to record array
# this coerces
result = df.to_records()
expected = np.rec.array([(0, 'a'), (1, 'b'), (2, 'c')],
dtype=[('index', '=i8'), ('0', 'O')])
tm.assert_almost_equal(result, expected)
def test_numeric_like_ops(self):
# numeric ops should not succeed
for op in ['__add__','__sub__','__mul__','__truediv__']:
self.assertRaises(TypeError, lambda : getattr(self.cat,op)(self.cat))
# reduction ops should not succeed (unless specifically defined, e.g. min/max)
s = self.cat['value_group']
for op in ['kurt','skew','var','std','mean','sum','median']:
self.assertRaises(TypeError, lambda : getattr(s,op)(numeric_only=False))
# mad technically works because it takes always the numeric data
# numpy ops
s = pd.Series(pd.Categorical([1,2,3,4]))
self.assertRaises(TypeError, lambda : np.sum(s))
# numeric ops on a Series
for op in ['__add__','__sub__','__mul__','__truediv__']:
self.assertRaises(TypeError, lambda : getattr(s,op)(2))
# invalid ufunc
self.assertRaises(TypeError, lambda : np.log(s))
def test_cat_tab_completition(self):
# test the tab completion display
ok_for_cat = ['categories','codes','ordered','set_categories',
'add_categories', 'remove_categories', 'rename_categories',
'reorder_categories', 'remove_unused_categories',
'as_ordered', 'as_unordered']
def get_dir(s):
results = [ r for r in s.cat.__dir__() if not r.startswith('_') ]
return list(sorted(set(results)))
s = Series(list('aabbcde')).astype('category')
results = get_dir(s)
tm.assert_almost_equal(results,list(sorted(set(ok_for_cat))))
def test_cat_accessor_api(self):
# GH 9322
from pandas.core.categorical import CategoricalAccessor
self.assertIs(Series.cat, CategoricalAccessor)
s = Series(list('aabbcde')).astype('category')
self.assertIsInstance(s.cat, CategoricalAccessor)
invalid = Series([1])
with tm.assertRaisesRegexp(AttributeError, "only use .cat accessor"):
invalid.cat
self.assertFalse(hasattr(invalid, 'cat'))
def test_cat_accessor_no_new_attributes(self):
# https://github.com/pydata/pandas/issues/10673
c = Series(list('aabbcde')).astype('category')
with tm.assertRaisesRegexp(AttributeError, "You cannot add any new attribute"):
c.cat.xlabel = "a"
def test_str_accessor_api_for_categorical(self):
# https://github.com/pydata/pandas/issues/10661
from pandas.core.strings import StringMethods
s = Series(list('aabb'))
s = s + " " + s
c = s.astype('category')
self.assertIsInstance(c.str, StringMethods)
# str functions, which need special arguments
special_func_defs = [
('cat', (list("zyxw"),), {"sep": ","}),
('center', (10,), {}),
('contains', ("a",), {}),
('count', ("a",), {}),
('decode', ("UTF-8",), {}),
('encode', ("UTF-8",), {}),
('endswith', ("a",), {}),
('extract', ("([a-z]*) ",), {}),
('find', ("a",), {}),
('findall', ("a",), {}),
('index', (" ",), {}),
('ljust', (10,), {}),
('match', ("a"), {}), # deprecated...
('normalize', ("NFC",), {}),
('pad', (10,), {}),
('partition', (" ",), {"expand": False}), # not default
('partition', (" ",), {"expand": True}), # default
('repeat', (3,), {}),
('replace', ("a", "z"), {}),
('rfind', ("a",), {}),
('rindex', (" ",), {}),
('rjust', (10,), {}),
('rpartition', (" ",), {"expand": False}), # not default
('rpartition', (" ",), {"expand": True}), # default
('slice', (0,1), {}),
('slice_replace', (0,1,"z"), {}),
('split', (" ",), {"expand":False}), #default
('split', (" ",), {"expand":True}), # not default
('startswith', ("a",), {}),
('wrap', (2,), {}),
('zfill', (10,), {})
]
_special_func_names = [f[0] for f in special_func_defs]
# * get, join: they need a individual elements of type lists, but
# we can't make a categorical with lists as individual categories.
# -> `s.str.split(" ").astype("category")` will error!
# * `translate` has different interfaces for py2 vs. py3
_ignore_names = ["get", "join", "translate"]
str_func_names = [f for f in dir(s.str) if not (f.startswith("_") or
f in _special_func_names or
f in _ignore_names)]
func_defs = [(f, (), {}) for f in str_func_names]
func_defs.extend(special_func_defs)
for func, args, kwargs in func_defs:
res = getattr(c.str, func)(*args, **kwargs)
exp = getattr(s.str, func)(*args, **kwargs)
if isinstance(res, pd.DataFrame):
tm.assert_frame_equal(res, exp)
else:
tm.assert_series_equal(res, exp)
invalid = Series([1,2,3]).astype('category')
with tm.assertRaisesRegexp(AttributeError, "Can only use .str accessor with string"):
invalid.str
self.assertFalse(hasattr(invalid, 'str'))
def test_dt_accessor_api_for_categorical(self):
# https://github.com/pydata/pandas/issues/10661
from pandas.tseries.common import Properties
from pandas.tseries.index import date_range, DatetimeIndex
from pandas.tseries.period import period_range, PeriodIndex
from pandas.tseries.tdi import timedelta_range, TimedeltaIndex
s_dr = Series(date_range('1/1/2015', periods=5, tz="MET"))
c_dr = s_dr.astype("category")
s_pr = Series(period_range('1/1/2015', freq='D', periods=5))
c_pr = s_pr.astype("category")
s_tdr = Series(timedelta_range('1 days','10 days'))
c_tdr = s_tdr.astype("category")
test_data = [
("Datetime", DatetimeIndex._datetimelike_ops, s_dr, c_dr),
("Period", PeriodIndex._datetimelike_ops, s_pr, c_pr),
("Timedelta", TimedeltaIndex._datetimelike_ops, s_tdr, c_tdr)]
self.assertIsInstance(c_dr.dt, Properties)
special_func_defs = [
('strftime', ("%Y-%m-%d",), {}),
('tz_convert', ("EST",), {}),
#('tz_localize', ("UTC",), {}),
]
_special_func_names = [f[0] for f in special_func_defs]
# the series is already localized
_ignore_names = ['tz_localize']
for name, attr_names, s, c in test_data:
func_names = [f for f in dir(s.dt) if not (f.startswith("_") or
f in attr_names or
f in _special_func_names or
f in _ignore_names)]
func_defs = [(f, (), {}) for f in func_names]
for f_def in special_func_defs:
if f_def[0] in dir(s.dt):
func_defs.append(f_def)
for func, args, kwargs in func_defs:
res = getattr(c.dt, func)(*args, **kwargs)
exp = getattr(s.dt, func)(*args, **kwargs)
if isinstance(res, pd.DataFrame):
tm.assert_frame_equal(res, exp)
elif isinstance(res, pd.Series):
tm.assert_series_equal(res, exp)
else:
tm.assert_numpy_array_equal(res, exp)
for attr in attr_names:
try:
res = getattr(c.dt, attr)
exp = getattr(s.dt, attr)
except Exception as e:
print(name, attr)
raise e
if isinstance(res, pd.DataFrame):
tm.assert_frame_equal(res, exp)
elif isinstance(res, pd.Series):
tm.assert_series_equal(res, exp)
else:
tm.assert_numpy_array_equal(res, exp)
invalid = Series([1,2,3]).astype('category')
with tm.assertRaisesRegexp(AttributeError, "Can only use .dt accessor with datetimelike"):
invalid.dt
self.assertFalse(hasattr(invalid, 'str'))
def test_pickle_v0_14_1(self):
# we have the name warning
# 10482
with tm.assert_produces_warning(UserWarning):
cat = pd.Categorical(values=['a', 'b', 'c'],
categories=['a', 'b', 'c', 'd'],
name='foobar', ordered=False)
pickle_path = os.path.join(tm.get_data_path(),
'categorical_0_14_1.pickle')
# This code was executed once on v0.14.1 to generate the pickle:
#
# cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'],
# name='foobar')
# with open(pickle_path, 'wb') as f: pickle.dump(cat, f)
#
self.assert_categorical_equal(cat, pd.read_pickle(pickle_path))
def test_pickle_v0_15_2(self):
# ordered -> _ordered
# GH 9347
# we have the name warning
# 10482
with tm.assert_produces_warning(UserWarning):
cat = pd.Categorical(values=['a', 'b', 'c'],
categories=['a', 'b', 'c', 'd'],
name='foobar', ordered=False)
pickle_path = os.path.join(tm.get_data_path(),
'categorical_0_15_2.pickle')
# This code was executed once on v0.15.2 to generate the pickle:
#
# cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'],
# name='foobar')
# with open(pickle_path, 'wb') as f: pickle.dump(cat, f)
#
self.assert_categorical_equal(cat, pd.read_pickle(pickle_path))
def test_concat_categorical(self):
# See GH 10177
df1 = pd.DataFrame(np.arange(18, dtype='int64').reshape(6, 3), columns=["a", "b", "c"])
df2 = pd.DataFrame(np.arange(14, dtype='int64').reshape(7, 2), columns=["a", "c"])
df2['h'] = pd.Series(pd.Categorical(["one", "one", "two", "one", "two", "two", "one"]))
df_concat = pd.concat((df1, df2), axis=0).reset_index(drop=True)
df_expected = pd.DataFrame({'a': [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12],
'b': [1, 4, 7, 10, 13, 16, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
'c': [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13]})
df_expected['h'] = pd.Series(pd.Categorical([None, None, None, None, None, None,
"one", "one", "two", "one", "two", "two", "one"]))
tm.assert_frame_equal(df_expected, df_concat)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# '--with-coverage', '--cover-package=pandas.core']
exit=False)
| artistic-2.0 |
pp-mo/iris | docs/iris/example_code/Oceanography/atlantic_profiles.py | 2 | 3469 | """
Oceanographic profiles and T-S diagrams
=======================================
This example demonstrates how to plot vertical profiles of different
variables in the same axes, and how to make a scatter plot of two
variables. There is an oceanographic theme but the same techniques are
equally applicable to atmospheric or other kinds of data.
The data used are profiles of potential temperature and salinity in the
Equatorial and South Atlantic, output from an ocean model.
The y-axis of the first plot produced will be automatically inverted due to the
presence of the attribute positive=down on the depth coordinate. This means
depth values intuitively increase downward on the y-axis.
"""
import iris
import iris.iterate
import iris.plot as iplt
import matplotlib.pyplot as plt
def main():
# Load the gridded temperature and salinity data.
fname = iris.sample_data_path("atlantic_profiles.nc")
cubes = iris.load(fname)
(theta,) = cubes.extract("sea_water_potential_temperature")
(salinity,) = cubes.extract("sea_water_practical_salinity")
# Extract profiles of temperature and salinity from a particular point in
# the southern portion of the domain, and limit the depth of the profile
# to 1000m.
lon_cons = iris.Constraint(longitude=330.5)
lat_cons = iris.Constraint(latitude=lambda l: -10 < l < -9)
depth_cons = iris.Constraint(depth=lambda d: d <= 1000)
theta_1000m = theta.extract(depth_cons & lon_cons & lat_cons)
salinity_1000m = salinity.extract(depth_cons & lon_cons & lat_cons)
# Plot these profiles on the same set of axes. In each case we call plot
# with two arguments, the cube followed by the depth coordinate. Putting
# them in this order places the depth coordinate on the y-axis.
# The first plot is in the default axes. We'll use the same color for the
# curve and its axes/tick labels.
plt.figure(figsize=(5, 6))
temperature_color = (0.3, 0.4, 0.5)
ax1 = plt.gca()
iplt.plot(
theta_1000m,
theta_1000m.coord("depth"),
linewidth=2,
color=temperature_color,
alpha=0.75,
)
ax1.set_xlabel("Potential Temperature / K", color=temperature_color)
ax1.set_ylabel("Depth / m")
for ticklabel in ax1.get_xticklabels():
ticklabel.set_color(temperature_color)
# To plot salinity in the same axes we use twiny(). We'll use a different
# color to identify salinity.
salinity_color = (0.6, 0.1, 0.15)
ax2 = plt.gca().twiny()
iplt.plot(
salinity_1000m,
salinity_1000m.coord("depth"),
linewidth=2,
color=salinity_color,
alpha=0.75,
)
ax2.set_xlabel("Salinity / PSU", color=salinity_color)
for ticklabel in ax2.get_xticklabels():
ticklabel.set_color(salinity_color)
plt.tight_layout()
iplt.show()
# Now plot a T-S diagram using scatter. We'll use all the profiles here,
# and each point will be coloured according to its depth.
plt.figure(figsize=(6, 6))
depth_values = theta.coord("depth").points
for s, t in iris.iterate.izip(salinity, theta, coords="depth"):
iplt.scatter(s, t, c=depth_values, marker="+", cmap="RdYlBu_r")
ax = plt.gca()
ax.set_xlabel("Salinity / PSU")
ax.set_ylabel("Potential Temperature / K")
cb = plt.colorbar(orientation="horizontal")
cb.set_label("Depth / m")
plt.tight_layout()
iplt.show()
if __name__ == "__main__":
main()
| lgpl-3.0 |
mne-tools/mne-tools.github.io | 0.16/_downloads/plot_stats_cluster_time_frequency_repeated_measures_anova.py | 7 | 10025 | """
====================================================================
Mass-univariate twoway repeated measures ANOVA on single trial power
====================================================================
This script shows how to conduct a mass-univariate repeated measures
ANOVA. As the model to be fitted assumes two fully crossed factors,
we will study the interplay between perceptual modality
(auditory VS visual) and the location of stimulus presentation
(left VS right). Here we use single trials as replications
(subjects) while iterating over time slices plus frequency bands
for to fit our mass-univariate model. For the sake of simplicity we
will confine this analysis to one single channel of which we know
that it exposes a strong induced response. We will then visualize
each effect by creating a corresponding mass-univariate effect
image. We conclude with accounting for multiple comparisons by
performing a permutation clustering test using the ANOVA as
clustering function. The results final will be compared to
multiple comparisons using False Discovery Rate correction.
"""
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet
from mne.stats import f_threshold_mway_rm, f_mway_rm, fdr_correction
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
tmin, tmax = -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
include = []
raw.info['bads'] += ['MEG 2443'] # bads
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
ch_name = 'MEG 1332'
# Load conditions
reject = dict(grad=4000e-13, eog=150e-6)
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=True,
reject=reject)
epochs.pick_channels([ch_name]) # restrict example to one channel
###############################################################################
# We have to make sure all conditions have the same counts, as the ANOVA
# expects a fully balanced data matrix and does not forgive imbalances that
# generously (risk of type-I error).
epochs.equalize_event_counts(event_id)
# Factor to down-sample the temporal dimension of the TFR computed by
# tfr_morlet.
decim = 2
freqs = np.arange(7, 30, 3) # define frequencies of interest
n_cycles = freqs / freqs[0]
zero_mean = False # don't correct morlet wavelet to be of mean zero
# To have a true wavelet zero_mean should be True but here for illustration
# purposes it helps to spot the evoked response.
###############################################################################
# Create TFR representations for all conditions
# ---------------------------------------------
epochs_power = list()
for condition in [epochs[k] for k in event_id]:
this_tfr = tfr_morlet(condition, freqs, n_cycles=n_cycles,
decim=decim, average=False, zero_mean=zero_mean,
return_itc=False)
this_tfr.apply_baseline(mode='ratio', baseline=(None, 0))
this_power = this_tfr.data[:, 0, :, :] # we only have one channel.
epochs_power.append(this_power)
###############################################################################
# Setup repeated measures ANOVA
# -----------------------------
#
# We will tell the ANOVA how to interpret the data matrix in terms of factors.
# This is done via the factor levels argument which is a list of the number
# factor levels for each factor.
n_conditions = len(epochs.event_id)
n_replications = epochs.events.shape[0] // n_conditions
factor_levels = [2, 2] # number of levels in each factor
effects = 'A*B' # this is the default signature for computing all effects
# Other possible options are 'A' or 'B' for the corresponding main effects
# or 'A:B' for the interaction effect only (this notation is borrowed from the
# R formula language)
n_freqs = len(freqs)
times = 1e3 * epochs.times[::decim]
n_times = len(times)
###############################################################################
# Now we'll assemble the data matrix and swap axes so the trial replications
# are the first dimension and the conditions are the second dimension.
data = np.swapaxes(np.asarray(epochs_power), 1, 0)
# reshape last two dimensions in one mass-univariate observation-vector
data = data.reshape(n_replications, n_conditions, n_freqs * n_times)
# so we have replications * conditions * observations:
print(data.shape)
###############################################################################
# While the iteration scheme used above for assembling the data matrix
# makes sure the first two dimensions are organized as expected (with A =
# modality and B = location):
#
# .. table:: Sample data layout
#
# ===== ==== ==== ==== ====
# trial A1B1 A1B2 A2B1 B2B2
# ===== ==== ==== ==== ====
# 1 1.34 2.53 0.97 1.74
# ... ... ... ... ...
# 56 2.45 7.90 3.09 4.76
# ===== ==== ==== ==== ====
#
# Now we're ready to run our repeated measures ANOVA.
#
# Note. As we treat trials as subjects, the test only accounts for
# time locked responses despite the 'induced' approach.
# For analysis for induced power at the group level averaged TRFs
# are required.
fvals, pvals = f_mway_rm(data, factor_levels, effects=effects)
effect_labels = ['modality', 'location', 'modality by location']
# let's visualize our effects by computing f-images
for effect, sig, effect_label in zip(fvals, pvals, effect_labels):
plt.figure()
# show naive F-values in gray
plt.imshow(effect.reshape(8, 211), cmap=plt.cm.gray, extent=[times[0],
times[-1], freqs[0], freqs[-1]], aspect='auto',
origin='lower')
# create mask for significant Time-frequency locations
effect = np.ma.masked_array(effect, [sig > .05])
plt.imshow(effect.reshape(8, 211), cmap='RdBu_r', extent=[times[0],
times[-1], freqs[0], freqs[-1]], aspect='auto',
origin='lower')
plt.colorbar()
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title(r"Time-locked response for '%s' (%s)" % (effect_label, ch_name))
plt.show()
###############################################################################
# Account for multiple comparisons using FDR versus permutation clustering test
# -----------------------------------------------------------------------------
#
# First we need to slightly modify the ANOVA function to be suitable for
# the clustering procedure. Also want to set some defaults.
# Let's first override effects to confine the analysis to the interaction
effects = 'A:B'
###############################################################################
# A stat_fun must deal with a variable number of input arguments.
# Inside the clustering function each condition will be passed as flattened
# array, necessitated by the clustering procedure. The ANOVA however expects an
# input array of dimensions: subjects X conditions X observations (optional).
# The following function catches the list input and swaps the first and
# the second dimension and finally calls the ANOVA function.
def stat_fun(*args):
return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
effects=effects, return_pvals=False)[0]
# The ANOVA returns a tuple f-values and p-values, we will pick the former.
pthresh = 0.001 # set threshold rather high to save some time
f_thresh = f_threshold_mway_rm(n_replications, factor_levels, effects,
pthresh)
tail = 1 # f-test, so tail > 0
n_permutations = 256 # Save some time (the test won't be too sensitive ...)
T_obs, clusters, cluster_p_values, h0 = mne.stats.permutation_cluster_test(
epochs_power, stat_fun=stat_fun, threshold=f_thresh, tail=tail, n_jobs=1,
n_permutations=n_permutations, buffer_size=None)
###############################################################################
# Create new stats image with only significant clusters:
good_clusters = np.where(cluster_p_values < .05)[0]
T_obs_plot = np.ma.masked_array(T_obs,
np.invert(clusters[np.squeeze(good_clusters)]))
plt.figure()
for f_image, cmap in zip([T_obs, T_obs_plot], [plt.cm.gray, 'RdBu_r']):
plt.imshow(f_image, cmap=cmap, extent=[times[0], times[-1],
freqs[0], freqs[-1]], aspect='auto',
origin='lower')
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title("Time-locked response for 'modality by location' (%s)\n"
" cluster-level corrected (p <= 0.05)" % ch_name)
plt.show()
###############################################################################
# Now using FDR:
mask, _ = fdr_correction(pvals[2])
T_obs_plot2 = np.ma.masked_array(T_obs, np.invert(mask))
plt.figure()
for f_image, cmap in zip([T_obs, T_obs_plot2], [plt.cm.gray, 'RdBu_r']):
plt.imshow(f_image, cmap=cmap, extent=[times[0], times[-1],
freqs[0], freqs[-1]], aspect='auto',
origin='lower')
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title("Time-locked response for 'modality by location' (%s)\n"
" FDR corrected (p <= 0.05)" % ch_name)
plt.show()
###############################################################################
# Both cluster level and FDR correction help get rid of
# putatively spots we saw in the naive f-images.
| bsd-3-clause |
zfrenchee/pandas | pandas/tests/frame/test_nonunique_indexes.py | 2 | 18505 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import numpy as np
from pandas.compat import lrange, u
from pandas import DataFrame, Series, MultiIndex, date_range
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameNonuniqueIndexes(TestData):
def test_column_dups_operations(self):
def check(result, expected=None):
if expected is not None:
assert_frame_equal(result, expected)
result.dtypes
str(result)
# assignment
# GH 3687
arr = np.random.randn(3, 2)
idx = lrange(2)
df = DataFrame(arr, columns=['A', 'A'])
df.columns = idx
expected = DataFrame(arr, columns=idx)
check(df, expected)
idx = date_range('20130101', periods=4, freq='Q-NOV')
df = DataFrame([[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]],
columns=['a', 'a', 'a', 'a'])
df.columns = idx
expected = DataFrame(
[[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], columns=idx)
check(df, expected)
# insert
df = DataFrame([[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]],
columns=['foo', 'bar', 'foo', 'hello'])
df['string'] = 'bah'
expected = DataFrame([[1, 1, 1, 5, 'bah'], [1, 1, 2, 5, 'bah'],
[2, 1, 3, 5, 'bah']],
columns=['foo', 'bar', 'foo', 'hello', 'string'])
check(df, expected)
with tm.assert_raises_regex(ValueError, 'Length of value'):
df.insert(0, 'AnotherColumn', range(len(df.index) - 1))
# insert same dtype
df['foo2'] = 3
expected = DataFrame([[1, 1, 1, 5, 'bah', 3], [1, 1, 2, 5, 'bah', 3],
[2, 1, 3, 5, 'bah', 3]],
columns=['foo', 'bar', 'foo', 'hello',
'string', 'foo2'])
check(df, expected)
# set (non-dup)
df['foo2'] = 4
expected = DataFrame([[1, 1, 1, 5, 'bah', 4], [1, 1, 2, 5, 'bah', 4],
[2, 1, 3, 5, 'bah', 4]],
columns=['foo', 'bar', 'foo', 'hello',
'string', 'foo2'])
check(df, expected)
df['foo2'] = 3
# delete (non dup)
del df['bar']
expected = DataFrame([[1, 1, 5, 'bah', 3], [1, 2, 5, 'bah', 3],
[2, 3, 5, 'bah', 3]],
columns=['foo', 'foo', 'hello', 'string', 'foo2'])
check(df, expected)
# try to delete again (its not consolidated)
del df['hello']
expected = DataFrame([[1, 1, 'bah', 3], [1, 2, 'bah', 3],
[2, 3, 'bah', 3]],
columns=['foo', 'foo', 'string', 'foo2'])
check(df, expected)
# consolidate
df = df._consolidate()
expected = DataFrame([[1, 1, 'bah', 3], [1, 2, 'bah', 3],
[2, 3, 'bah', 3]],
columns=['foo', 'foo', 'string', 'foo2'])
check(df, expected)
# insert
df.insert(2, 'new_col', 5.)
expected = DataFrame([[1, 1, 5., 'bah', 3], [1, 2, 5., 'bah', 3],
[2, 3, 5., 'bah', 3]],
columns=['foo', 'foo', 'new_col', 'string',
'foo2'])
check(df, expected)
# insert a dup
tm.assert_raises_regex(ValueError, 'cannot insert',
df.insert, 2, 'new_col', 4.)
df.insert(2, 'new_col', 4., allow_duplicates=True)
expected = DataFrame([[1, 1, 4., 5., 'bah', 3],
[1, 2, 4., 5., 'bah', 3],
[2, 3, 4., 5., 'bah', 3]],
columns=['foo', 'foo', 'new_col',
'new_col', 'string', 'foo2'])
check(df, expected)
# delete (dup)
del df['foo']
expected = DataFrame([[4., 5., 'bah', 3], [4., 5., 'bah', 3],
[4., 5., 'bah', 3]],
columns=['new_col', 'new_col', 'string', 'foo2'])
assert_frame_equal(df, expected)
# dup across dtypes
df = DataFrame([[1, 1, 1., 5], [1, 1, 2., 5], [2, 1, 3., 5]],
columns=['foo', 'bar', 'foo', 'hello'])
check(df)
df['foo2'] = 7.
expected = DataFrame([[1, 1, 1., 5, 7.], [1, 1, 2., 5, 7.],
[2, 1, 3., 5, 7.]],
columns=['foo', 'bar', 'foo', 'hello', 'foo2'])
check(df, expected)
result = df['foo']
expected = DataFrame([[1, 1.], [1, 2.], [2, 3.]],
columns=['foo', 'foo'])
check(result, expected)
# multiple replacements
df['foo'] = 'string'
expected = DataFrame([['string', 1, 'string', 5, 7.],
['string', 1, 'string', 5, 7.],
['string', 1, 'string', 5, 7.]],
columns=['foo', 'bar', 'foo', 'hello', 'foo2'])
check(df, expected)
del df['foo']
expected = DataFrame([[1, 5, 7.], [1, 5, 7.], [1, 5, 7.]], columns=[
'bar', 'hello', 'foo2'])
check(df, expected)
# values
df = DataFrame([[1, 2.5], [3, 4.5]], index=[1, 2], columns=['x', 'x'])
result = df.values
expected = np.array([[1, 2.5], [3, 4.5]])
assert (result == expected).all().all()
# rename, GH 4403
df4 = DataFrame(
{'TClose': [22.02],
'RT': [0.0454],
'TExg': [0.0422]},
index=MultiIndex.from_tuples([(600809, 20130331)],
names=['STK_ID', 'RPT_Date']))
df5 = DataFrame({'STK_ID': [600809] * 3,
'RPT_Date': [20120930, 20121231, 20130331],
'STK_Name': [u('饡驦'), u('饡驦'), u('饡驦')],
'TClose': [38.05, 41.66, 30.01]},
index=MultiIndex.from_tuples(
[(600809, 20120930),
(600809, 20121231),
(600809, 20130331)],
names=['STK_ID', 'RPT_Date']))
k = pd.merge(df4, df5, how='inner', left_index=True, right_index=True)
result = k.rename(
columns={'TClose_x': 'TClose', 'TClose_y': 'QT_Close'})
str(result)
result.dtypes
expected = (DataFrame([[0.0454, 22.02, 0.0422, 20130331, 600809,
u('饡驦'), 30.01]],
columns=['RT', 'TClose', 'TExg',
'RPT_Date', 'STK_ID', 'STK_Name',
'QT_Close'])
.set_index(['STK_ID', 'RPT_Date'], drop=False))
assert_frame_equal(result, expected)
# reindex is invalid!
df = DataFrame([[1, 5, 7.], [1, 5, 7.], [1, 5, 7.]],
columns=['bar', 'a', 'a'])
pytest.raises(ValueError, df.reindex, columns=['bar'])
pytest.raises(ValueError, df.reindex, columns=['bar', 'foo'])
# drop
df = DataFrame([[1, 5, 7.], [1, 5, 7.], [1, 5, 7.]],
columns=['bar', 'a', 'a'])
result = df.drop(['a'], axis=1)
expected = DataFrame([[1], [1], [1]], columns=['bar'])
check(result, expected)
result = df.drop('a', axis=1)
check(result, expected)
# describe
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['bar', 'a', 'a'], dtype='float64')
result = df.describe()
s = df.iloc[:, 0].describe()
expected = pd.concat([s, s, s], keys=df.columns, axis=1)
check(result, expected)
# check column dups with index equal and not equal to df's index
df = DataFrame(np.random.randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'A'])
for index in [df.index, pd.Index(list('edcba'))]:
this_df = df.copy()
expected_ser = pd.Series(index.values, index=this_df.index)
expected_df = DataFrame.from_items([('A', expected_ser),
('B', this_df['B']),
('A', expected_ser)])
this_df['A'] = index
check(this_df, expected_df)
# operations
for op in ['__add__', '__mul__', '__sub__', '__truediv__']:
df = DataFrame(dict(A=np.arange(10), B=np.random.rand(10)))
expected = getattr(df, op)(df)
expected.columns = ['A', 'A']
df.columns = ['A', 'A']
result = getattr(df, op)(df)
check(result, expected)
# multiple assignments that change dtypes
# the location indexer is a slice
# GH 6120
df = DataFrame(np.random.randn(5, 2), columns=['that', 'that'])
expected = DataFrame(1.0, index=range(5), columns=['that', 'that'])
df['that'] = 1.0
check(df, expected)
df = DataFrame(np.random.rand(5, 2), columns=['that', 'that'])
expected = DataFrame(1, index=range(5), columns=['that', 'that'])
df['that'] = 1
check(df, expected)
def test_column_dups2(self):
# drop buggy GH 6240
df = DataFrame({'A': np.random.randn(5),
'B': np.random.randn(5),
'C': np.random.randn(5),
'D': ['a', 'b', 'c', 'd', 'e']})
expected = df.take([0, 1, 1], axis=1)
df2 = df.take([2, 0, 1, 2, 1], axis=1)
result = df2.drop('C', axis=1)
assert_frame_equal(result, expected)
# dropna
df = DataFrame({'A': np.random.randn(5),
'B': np.random.randn(5),
'C': np.random.randn(5),
'D': ['a', 'b', 'c', 'd', 'e']})
df.iloc[2, [0, 1, 2]] = np.nan
df.iloc[0, 0] = np.nan
df.iloc[1, 1] = np.nan
df.iloc[:, 3] = np.nan
expected = df.dropna(subset=['A', 'B', 'C'], how='all')
expected.columns = ['A', 'A', 'B', 'C']
df.columns = ['A', 'A', 'B', 'C']
result = df.dropna(subset=['A', 'C'], how='all')
assert_frame_equal(result, expected)
def test_column_dups_indexing(self):
def check(result, expected=None):
if expected is not None:
assert_frame_equal(result, expected)
result.dtypes
str(result)
# boolean indexing
# GH 4879
dups = ['A', 'A', 'C', 'D']
df = DataFrame(np.arange(12).reshape(3, 4), columns=[
'A', 'B', 'C', 'D'], dtype='float64')
expected = df[df.C > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3, 4),
columns=dups, dtype='float64')
result = df[df.C > 6]
check(result, expected)
# where
df = DataFrame(np.arange(12).reshape(3, 4), columns=[
'A', 'B', 'C', 'D'], dtype='float64')
expected = df[df > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3, 4),
columns=dups, dtype='float64')
result = df[df > 6]
check(result, expected)
# boolean with the duplicate raises
df = DataFrame(np.arange(12).reshape(3, 4),
columns=dups, dtype='float64')
pytest.raises(ValueError, lambda: df[df.A > 6])
# dup aligining operations should work
# GH 5185
df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3])
df2 = DataFrame([1, 2, 3], index=[1, 2, 3])
expected = DataFrame([0, 2, 0, 2, 2], index=[1, 1, 2, 2, 3])
result = df1.sub(df2)
assert_frame_equal(result, expected)
# equality
df1 = DataFrame([[1, 2], [2, np.nan], [3, 4], [4, 4]],
columns=['A', 'B'])
df2 = DataFrame([[0, 1], [2, 4], [2, np.nan], [4, 5]],
columns=['A', 'A'])
# not-comparing like-labelled
pytest.raises(ValueError, lambda: df1 == df2)
df1r = df1.reindex_like(df2)
result = df1r == df2
expected = DataFrame([[False, True], [True, False], [False, False], [
True, False]], columns=['A', 'A'])
assert_frame_equal(result, expected)
# mixed column selection
# GH 5639
dfbool = DataFrame({'one': Series([True, True, False],
index=['a', 'b', 'c']),
'two': Series([False, False, True, False],
index=['a', 'b', 'c', 'd']),
'three': Series([False, True, True, True],
index=['a', 'b', 'c', 'd'])})
expected = pd.concat(
[dfbool['one'], dfbool['three'], dfbool['one']], axis=1)
result = dfbool[['one', 'three', 'one']]
check(result, expected)
# multi-axis dups
# GH 6121
df = DataFrame(np.arange(25.).reshape(5, 5),
index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'C', 'D', 'E'])
z = df[['A', 'C', 'A']].copy()
expected = z.loc[['a', 'c', 'a']]
df = DataFrame(np.arange(25.).reshape(5, 5),
index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'C', 'D', 'E'])
z = df[['A', 'C', 'A']]
result = z.loc[['a', 'c', 'a']]
check(result, expected)
def test_column_dups_indexing2(self):
# GH 8363
# datetime ops with a non-unique index
df = DataFrame({'A': np.arange(5, dtype='int64'),
'B': np.arange(1, 6, dtype='int64')},
index=[2, 2, 3, 3, 4])
result = df.B - df.A
expected = Series(1, index=[2, 2, 3, 3, 4])
assert_series_equal(result, expected)
df = DataFrame({'A': date_range('20130101', periods=5),
'B': date_range('20130101 09:00:00', periods=5)},
index=[2, 2, 3, 3, 4])
result = df.B - df.A
expected = Series(pd.Timedelta('9 hours'), index=[2, 2, 3, 3, 4])
assert_series_equal(result, expected)
def test_columns_with_dups(self):
# GH 3468 related
# basic
df = DataFrame([[1, 2]], columns=['a', 'a'])
df.columns = ['a', 'a.1']
str(df)
expected = DataFrame([[1, 2]], columns=['a', 'a.1'])
assert_frame_equal(df, expected)
df = DataFrame([[1, 2, 3]], columns=['b', 'a', 'a'])
df.columns = ['b', 'a', 'a.1']
str(df)
expected = DataFrame([[1, 2, 3]], columns=['b', 'a', 'a.1'])
assert_frame_equal(df, expected)
# with a dup index
df = DataFrame([[1, 2]], columns=['a', 'a'])
df.columns = ['b', 'b']
str(df)
expected = DataFrame([[1, 2]], columns=['b', 'b'])
assert_frame_equal(df, expected)
# multi-dtype
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=['a', 'a', 'b', 'b', 'd', 'c', 'c'])
df.columns = list('ABCDEFG')
str(df)
expected = DataFrame(
[[1, 2, 1., 2., 3., 'foo', 'bar']], columns=list('ABCDEFG'))
assert_frame_equal(df, expected)
# this is an error because we cannot disambiguate the dup columns
pytest.raises(Exception, lambda x: DataFrame(
[[1, 2, 'foo', 'bar']], columns=['a', 'a', 'a', 'a']))
# dups across blocks
df_float = DataFrame(np.random.randn(10, 3), dtype='float64')
df_int = DataFrame(np.random.randn(10, 3), dtype='int64')
df_bool = DataFrame(True, index=df_float.index,
columns=df_float.columns)
df_object = DataFrame('foo', index=df_float.index,
columns=df_float.columns)
df_dt = DataFrame(pd.Timestamp('20010101'),
index=df_float.index,
columns=df_float.columns)
df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)
assert len(df._data._blknos) == len(df.columns)
assert len(df._data._blklocs) == len(df.columns)
# testing iloc
for i in range(len(df.columns)):
df.iloc[:, i]
# dup columns across dtype GH 2079/2194
vals = [[1, -1, 2.], [2, -2, 3.]]
rs = DataFrame(vals, columns=['A', 'A', 'B'])
xp = DataFrame(vals)
xp.columns = ['A', 'A', 'B']
assert_frame_equal(rs, xp)
def test_values_duplicates(self):
df = DataFrame([[1, 2, 'a', 'b'],
[1, 2, 'a', 'b']],
columns=['one', 'one', 'two', 'two'])
result = df.values
expected = np.array([[1, 2, 'a', 'b'], [1, 2, 'a', 'b']],
dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_set_value_by_index(self):
# See gh-12344
df = DataFrame(np.arange(9).reshape(3, 3).T)
df.columns = list('AAA')
expected = df.iloc[:, 2]
df.iloc[:, 0] = 3
assert_series_equal(df.iloc[:, 2], expected)
df = DataFrame(np.arange(9).reshape(3, 3).T)
df.columns = [2, float(2), str(2)]
expected = df.iloc[:, 1]
df.iloc[:, 0] = 3
assert_series_equal(df.iloc[:, 1], expected)
def test_insert_with_columns_dups(self):
# GH 14291
df = pd.DataFrame()
df.insert(0, 'A', ['g', 'h', 'i'], allow_duplicates=True)
df.insert(0, 'A', ['d', 'e', 'f'], allow_duplicates=True)
df.insert(0, 'A', ['a', 'b', 'c'], allow_duplicates=True)
exp = pd.DataFrame([['a', 'd', 'g'], ['b', 'e', 'h'],
['c', 'f', 'i']], columns=['A', 'A', 'A'])
assert_frame_equal(df, exp)
| bsd-3-clause |
tmeits/pybrain | pybrain/tools/neuralnets.py | 26 | 13763 | # Neural network data analysis tool collection. Makes heavy use of the logging module.
# Can generate training curves during the run (from properly setup IPython and/or with
# TkAgg backend and interactive mode - see matplotlib documentation).
__author__ = "Martin Felder"
__version__ = "$Id$"
from pylab import ion, figure, draw
import csv
from numpy import Infinity
import logging
from pybrain.datasets import ClassificationDataSet, SequentialDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised import BackpropTrainer, RPropMinusTrainer, Trainer
from pybrain.structure import SoftmaxLayer, LSTMLayer
from pybrain.utilities import setAllArgs
from pybrain.tools.plotting import MultilinePlotter
from pybrain.tools.validation import testOnSequenceData, ModuleValidator, Validator
from pybrain.tools.customxml import NetworkWriter
class NNtools(object):
""" Abstract class providing basic functionality to make neural network training more comfortable """
def __init__(self, DS, **kwargs):
""" Initialize with the training data set DS. All keywords given are set as member variables.
The following are particularly important:
:key hidden: number of hidden units
:key TDS: test data set for checking convergence
:key VDS: validation data set for final performance evaluation
:key epoinc: number of epochs to train for, before checking convergence (default: 5)
"""
self.DS = DS
self.hidden = 10
self.maxepochs = 1000
self.Graph = None
self.TDS = None
self.VDS = None
self.epoinc = 5
setAllArgs(self, kwargs)
self.trainCurve = None
def initGraphics(self, ymax=10, xmax= -1):
""" initialize the interactive graphics output window, and return a handle to the plot """
if xmax < 0:
xmax = self.maxepochs
figure(figsize=[12, 8])
ion()
draw()
#self.Graph = MultilinePlotter(autoscale=1.2 ) #xlim=[0, self.maxepochs], ylim=[0, ymax])
self.Graph = MultilinePlotter(xlim=[0, xmax], ylim=[0, ymax])
self.Graph.setLineStyle([0, 1], linewidth=2)
return self.Graph
def set(self, **kwargs):
""" convenience method to set several member variables at once """
setAllArgs(self, kwargs)
def saveTrainingCurve(self, learnfname):
""" save the training curves into a file with the given name (CSV format) """
logging.info('Saving training curves into ' + learnfname)
if self.trainCurve is None:
logging.error('No training curve available for saving!')
learnf = open(learnfname, "wb")
writer = csv.writer(learnf, dialect='excel')
nDataSets = len(self.trainCurve)
for i in range(1, len(self.trainCurve[0]) - 1):
writer.writerow([self.trainCurve[k][i] for k in range(nDataSets)])
learnf.close()
def saveNetwork(self, fname):
""" save the trained network to a file """
NetworkWriter.writeToFile(self.Trainer.module, fname)
logging.info("Network saved to: " + fname)
#=======================================================================================================
class NNregression(NNtools):
""" Learns to numerically predict the targets of a set of data, with optional online progress plots. """
def setupNN(self, trainer=RPropMinusTrainer, hidden=None, **trnargs):
""" Constructs a 3-layer FNN for regression. Optional arguments are passed on to the Trainer class. """
if hidden is not None:
self.hidden = hidden
logging.info("Constructing FNN with following config:")
FNN = buildNetwork(self.DS.indim, self.hidden, self.DS.outdim)
logging.info(str(FNN) + "\n Hidden units:\n " + str(self.hidden))
logging.info("Training FNN with following special arguments:")
logging.info(str(trnargs))
self.Trainer = trainer(FNN, dataset=self.DS, **trnargs)
def runTraining(self, convergence=0, **kwargs):
""" Trains the network on the stored dataset. If convergence is >0, check after that many epoch increments
whether test error is going down again, and stop training accordingly.
CAVEAT: No support for Sequential datasets!"""
assert isinstance(self.Trainer, Trainer)
if self.Graph is not None:
self.Graph.setLabels(x='epoch', y='normalized regression error')
self.Graph.setLegend(['training', 'test'], loc='upper right')
epoch = 0
inc = self.epoinc
best_error = Infinity
best_epoch = 0
learncurve_x = [0]
learncurve_y = [0.0]
valcurve_y = [0.0]
converged = False
convtest = 0
if convergence > 0:
logging.info("Convergence criterion: %d batches of %d epochs w/o improvement" % (convergence, inc))
while epoch <= self.maxepochs and not converged:
self.Trainer.trainEpochs(inc)
epoch += inc
learncurve_x.append(epoch)
# calculate errors on TRAINING data
err_trn = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.DS)
learncurve_y.append(err_trn)
if self.TDS is None:
logging.info("epoch: %6d, err_trn: %10g" % (epoch, err_trn))
else:
# calculate same errors on TEST data
err_tst = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.TDS)
valcurve_y.append(err_tst)
if err_tst < best_error:
# store best error and parameters
best_epoch = epoch
best_error = err_tst
bestweights = self.Trainer.module.params.copy()
convtest = 0
else:
convtest += 1
logging.info("epoch: %6d, err_trn: %10g, err_tst: %10g, best_tst: %10g" % (epoch, err_trn, err_tst, best_error))
if self.Graph is not None:
self.Graph.addData(1, epoch, err_tst)
# check if convegence criterion is fulfilled (no improvement after N epoincs)
if convtest >= convergence:
converged = True
if self.Graph is not None:
self.Graph.addData(0, epoch, err_trn)
self.Graph.update()
# training finished!
logging.info("Best epoch: %6d, with error: %10g" % (best_epoch, best_error))
if self.VDS is not None:
# calculate same errors on VALIDATION data
self.Trainer.module.params[:] = bestweights.copy()
err_val = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.VDS)
logging.info("Result on evaluation data: %10g" % err_val)
# store training curve for saving into file
self.trainCurve = (learncurve_x, learncurve_y, valcurve_y)
#=======================================================================================================
class NNclassifier(NNtools):
""" Learns to classify a set of data, with optional online progress plots. """
def __init__(self, DS, **kwargs):
""" Initialize the classifier: the least we need is the dataset to be classified. All keywords given are set as member variables. """
if not isinstance(DS, ClassificationDataSet):
raise TypeError('Need a ClassificationDataSet to do classification!')
NNtools.__init__(self, DS, **kwargs)
self.nClasses = self.DS.nClasses # need this because targets may be altered later
self.clsnames = None
self.targetsAreOneOfMany = False
def _convertAllDataToOneOfMany(self, values=[0, 1]):
""" converts all datasets associated with self into 1-out-of-many representations,
e.g. with original classes 0 to 4, the new target for class 1 would be [0,1,0,0,0],
or accordingly with other upper and lower bounds, as given by the values keyword """
if self.targetsAreOneOfMany:
return
else:
# convert all datasets to one-of-many ("winner takes all") representation
for dsname in ["DS", "TDS", "VDS"]:
d = getattr(self, dsname)
if d is not None:
if d.outdim < d.nClasses:
d._convertToOneOfMany(values)
self.targetsAreOneOfMany = True
def setupNN(self, trainer=RPropMinusTrainer, hidden=None, **trnargs):
""" Setup FNN and trainer for classification. """
self._convertAllDataToOneOfMany()
if hidden is not None:
self.hidden = hidden
FNN = buildNetwork(self.DS.indim, self.hidden, self.DS.outdim, outclass=SoftmaxLayer)
logging.info("Constructing classification FNN with following config:")
logging.info(str(FNN) + "\n Hidden units:\n " + str(self.hidden))
logging.info("Trainer received the following special arguments:")
logging.info(str(trnargs))
self.Trainer = trainer(FNN, dataset=self.DS, **trnargs)
def setupRNN(self, trainer=BackpropTrainer, hidden=None, **trnargs):
""" Setup an LSTM RNN and trainer for sequence classification. """
if hidden is not None:
self.hidden = hidden
self._convertAllDataToOneOfMany()
RNN = buildNetwork(self.DS.indim, self.hidden, self.DS.outdim, hiddenclass=LSTMLayer,
recurrent=True, outclass=SoftmaxLayer)
logging.info("Constructing classification RNN with following config:")
logging.info(str(RNN) + "\n Hidden units:\n " + str(self.hidden))
logging.info("Trainer received the following special arguments:")
logging.info(str(trnargs))
self.Trainer = trainer(RNN, dataset=self.DS, **trnargs)
def runTraining(self, convergence=0, **kwargs):
""" Trains the network on the stored dataset. If convergence is >0, check after that many epoch increments
whether test error is going down again, and stop training accordingly. """
assert isinstance(self.Trainer, Trainer)
if self.Graph is not None:
self.Graph.setLabels(x='epoch', y='% classification error')
self.Graph.setLegend(['training', 'test'], loc='lower right')
epoch = 0
inc = self.epoinc
best_error = 100.0
best_epoch = 0
learncurve_x = [0]
learncurve_y = [0.0]
valcurve_y = [0.0]
converged = False
convtest = 0
if convergence > 0:
logging.info("Convergence criterion: %d batches of %d epochs w/o improvement" % (convergence, inc))
while epoch <= self.maxepochs and not converged:
self.Trainer.trainEpochs(inc)
epoch += inc
learncurve_x.append(epoch)
# calculate errors on TRAINING data
if isinstance(self.DS, SequentialDataSet):
r_trn = 100. * (1.0 - testOnSequenceData(self.Trainer.module, self.DS))
else:
# FIXME: messy - validation does not belong into the Trainer...
out, trueclass = self.Trainer.testOnClassData(return_targets=True)
r_trn = 100. * (1.0 - Validator.classificationPerformance(out, trueclass))
learncurve_y.append(r_trn)
if self.TDS is None:
logging.info("epoch: %6d, err_trn: %5.2f%%" % (epoch, r_trn))
else:
# calculate errors on TEST data
if isinstance(self.DS, SequentialDataSet):
r_tst = 100. * (1.0 - testOnSequenceData(self.Trainer.module, self.TDS))
else:
# FIXME: messy - validation does not belong into the Trainer...
out, trueclass = self.Trainer.testOnClassData(return_targets=True, dataset=self.TDS)
r_tst = 100. * (1.0 - Validator.classificationPerformance(out, trueclass))
valcurve_y.append(r_tst)
if r_tst < best_error:
best_epoch = epoch
best_error = r_tst
bestweights = self.Trainer.module.params.copy()
convtest = 0
else:
convtest += 1
logging.info("epoch: %6d, err_trn: %5.2f%%, err_tst: %5.2f%%, best_tst: %5.2f%%" % (epoch, r_trn, r_tst, best_error))
if self.Graph is not None:
self.Graph.addData(1, epoch, r_tst)
# check if convegence criterion is fulfilled (no improvement after N epoincs)
if convtest >= convergence:
converged = True
if self.Graph is not None:
self.Graph.addData(0, epoch, r_trn)
self.Graph.update()
logging.info("Best epoch: %6d, with error: %5.2f%%" % (best_epoch, best_error))
if self.VDS is not None:
# calculate errors on VALIDATION data
self.Trainer.module.params[:] = bestweights.copy()
if isinstance(self.DS, SequentialDataSet):
r_val = 100. * (1.0 - testOnSequenceData(self.Trainer.module, self.VDS))
else:
out, trueclass = self.Trainer.testOnClassData(return_targets=True, dataset=self.VDS)
r_val = 100. * (1.0 - Validator.classificationPerformance(out, trueclass))
logging.info("Result on evaluation data: %5.2f%%" % r_val)
self.trainCurve = (learncurve_x, learncurve_y, valcurve_y)
| bsd-3-clause |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/benchmarks/bench_tree.py | 1 | 3618 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import gc
from datetime import datetime
import numpy as np
import pylab as pl
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| mit |
sachintyagi22/spark | python/pyspark/sql/session.py | 7 | 25220 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
else:
from itertools import imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.catalog import Catalog
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, _verify_type, \
_infer_schema, _has_nulltype, _merge_type, _create_converter, _parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
# This SparkContext may be an existing one.
for key, value in self._options.items():
# we need to propagate the confs
# before we create the SparkSession. Otherwise, confs like
# warehouse path and metastore url will not be set correctly (
# these confs cannot be changed once the SparkSession is created).
sc._conf.set(key, value)
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
for key, value in self._options.items():
session.sparkContext._conf.set(key, value)
return session
builder = Builder()
_instantiatedSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
"""
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.context import UDFRegistration
return UDFRegistration(self._wrapped)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, map(_infer_schema, data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(_infer_schema).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
if schema is None:
schema = [str(x) for x in data.columns]
data = [r.tolist() for r in data.to_records(index=False)]
verify_func = _verify_type if verifySchema else lambda _, t: True
if isinstance(schema, StructType):
def prepare(obj):
verify_func(obj, schema)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
def prepare(obj):
verify_func(obj, dataType)
return obj,
else:
if isinstance(schema, list):
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Experimental.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Experimental.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
SparkSession._instantiatedSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
ZENGXH/scikit-learn | sklearn/neighbors/regression.py | 106 | 10572 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
HUGG/NGWM2016-modelling-course | Lessons/04-Basic-fluid-mechanics/scripts/1D-asthenospheric-counterflow.py | 1 | 1766 | # -*- coding: utf-8 -*-
"""
1D-asthenospheric-counterflow.py
A script for plotting velocity magnitudes for 1D counterflow in the
asthenosphere.
dwhipp 01.16
"""
#--- User-defined input variables
hl = 100.0 # Thickness of lithosphere [km]
h = 200.0 # Thickness of asthenosphere [km]
u0 = 15.0 # Lithospheric plate velocity [cm/a]
numpts = 101 # Number of points to calculate velocity across channel
#--- End user-defined input
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
# Convert inputs to SI values
h = h * 1000.0 # [km] -> [m]
hl = hl * 1000.0 # [km] -> [m]
u0 = u0 / 1000.0 / 365.25 / 24.0 / 3600.0 # [mm/a] -> [m/s]
# Define channel arrays
y = np.linspace(0.0,h,numpts)
u = np.zeros(numpts)
# Loop across all values of y and define velocity
for i in range(numpts):
# Insert equation for asthenospheric counterflow below
u[i] = ????
# Rescale values of y and u for plotting
y = y / 1000.0
u = u * 1000.0 * 365.25 * 24.0 * 3600.0
# Create figure for plotting
plt.figure()
# Make plot
plt.plot(u,y,'ko-')
# Invert y axis
plt.gca().invert_yaxis()
# Add text label with thickness of lithospheric plate
plt.text(????)
# Label axes and add title
plt.xlabel("Flow velocity [mm/a]")
plt.ylabel("Distance across channel [km]")
plt.title("Asthenospheric counterflow")
# Show plot
plt.show() | mit |
Subsets and Splits